diff --git a/go.mod b/go.mod index e6089f27bc..95efc4d33a 100644 --- a/go.mod +++ b/go.mod @@ -3,103 +3,107 @@ module github.com/GoogleCloudPlatform/prometheus-engine go 1.24 require ( - cloud.google.com/go/compute/metadata v0.3.0 - cloud.google.com/go/monitoring v1.18.0 + cloud.google.com/go/compute/metadata v0.6.0 + cloud.google.com/go/monitoring v1.20.4 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b github.com/go-kit/log v0.2.1 github.com/go-logr/logr v1.4.2 github.com/gogo/protobuf v1.3.2 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/googleapis/gax-go/v2 v2.12.0 + github.com/googleapis/gax-go/v2 v2.14.0 github.com/grafana/grafana-api-golang-client v0.27.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/prometheus/alertmanager v0.26.0 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.47.0 + github.com/prometheus/alertmanager v0.27.0 + github.com/prometheus/client_golang v1.21.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.62.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7 - github.com/stretchr/testify v1.9.0 - github.com/thanos-io/thanos v0.34.2-0.20240314081355-f731719f9515 + github.com/stretchr/testify v1.10.0 + github.com/thanos-io/thanos v0.38.0 go.uber.org/zap v1.26.0 - golang.org/x/mod v0.17.0 - golang.org/x/oauth2 v0.17.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.162.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 - google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.33.0 + golang.org/x/mod v0.24.0 + golang.org/x/oauth2 v0.24.0 + golang.org/x/time v0.8.0 + google.golang.org/api v0.213.0 + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.30.1 + k8s.io/api v0.31.3 k8s.io/apiextensions-apiserver v0.30.1 - k8s.io/apimachinery v0.30.2 + k8s.io/apimachinery v0.31.3 k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2 - k8s.io/client-go v0.30.1 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/client-go v0.31.3 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.18.7 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.50.0 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/efficientgo/core v1.0.0-rc.2 // indirect + github.com/edsrzf/mmap-go v1.2.0 // indirect + github.com/efficientgo/core v1.0.0-rc.3 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/analysis v0.22.2 // indirect - github.com/go-openapi/errors v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/loads v0.21.5 // indirect github.com/go-openapi/spec v0.20.14 // indirect - github.com/go-openapi/strfmt v0.22.0 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.22.6 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.23.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -109,35 +113,36 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/code-generator v0.30.2 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index 80ccb86b5b..f232ddb81c 100644 --- a/go.sum +++ b/go.sum @@ -98,6 +98,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -178,8 +182,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -368,8 +372,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4= -cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= +cloud.google.com/go/monitoring v1.20.4 h1:zwcViK7mT9SV0kzKqLOI3spRadvsmvw/R9z1MHNeC0E= +cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -602,12 +606,14 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -624,6 +630,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -640,8 +648,8 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= @@ -654,8 +662,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.50.0 h1:HBtrLeO+QyDKnc3t1+5DR1RxodOHCGr8ZcrHudpv7jI= -github.com/aws/aws-sdk-go v1.50.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -665,8 +673,9 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -684,8 +693,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -693,6 +702,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= @@ -707,10 +718,10 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= -github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= +github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/efficientgo/core v1.0.0-rc.3 h1:X6CdgycYWDcbYiJr1H1+lQGzx13o7bq3EUkbB9DsSPc= +github.com/efficientgo/core v1.0.0-rc.3/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b h1:8VX23BNufsa4KCqnnEonvI3yrou2Pjp8JLcbdVn0Fs8= github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b/go.mod h1:plsKU0YHE9uX+7utvr7SiDtVBSHJyEfHRO4UnUgDmts= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -726,14 +737,14 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= @@ -744,8 +755,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= @@ -768,28 +781,29 @@ github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= -github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= -github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= -github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= -github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo= -github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= +github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-resty/resty/v2 v2.10.0 h1:Qla4W/+TMmv0fOeeRqzEpXPLfTUnR5HZ1+lGs+CkiCo= github.com/go-resty/resty/v2 v2.10.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobs/pretty v0.0.0-20180724170744-09732c25a95b h1:/vQ+oYKu+JoyaMPDsv5FzwuL2wwWBgBbtj/YLCi4LuA= @@ -839,7 +853,6 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -863,8 +876,9 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -890,14 +904,14 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -909,8 +923,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -925,19 +939,18 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= github.com/gophercloud/gophercloud v1.4.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/grafana-api-golang-client v0.27.0 h1:zIwMXcbCB4n588i3O2N6HfNcQogCNTd/vPkEXTr7zX8= github.com/grafana/grafana-api-golang-client v0.27.0/go.mod h1:uNLZEmgKtTjHBtCQMwNn3qsx2mpMb8zU+7T4Xv3NR9Y= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -996,14 +1009,17 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -1033,8 +1049,8 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= @@ -1045,15 +1061,14 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= @@ -1065,10 +1080,10 @@ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DV github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -1087,36 +1102,43 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= -github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= +github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= +github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.45.2 h1:a3GSH0uDc0cSIxbrPYExbzEkioSUhS/NozTDoBZ6osc= github.com/prometheus/prometheus v0.45.2/go.mod h1:tIeln2qi5gcislHLHDBAfahsQ8mfPF5ttuHcK4v5UtU= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= @@ -1142,18 +1164,18 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/thanos-io/thanos v0.34.2-0.20240314081355-f731719f9515 h1:ua9li61Ke2KuN88BWuZN6UKuJSNM1xa6JW2PdQEnozA= -github.com/thanos-io/thanos v0.34.2-0.20240314081355-f731719f9515/go.mod h1:bXHn+l9BZHRRz+pcUnRSNNg5FyWEkbofxmNgXM+A5J4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/thanos-io/thanos v0.38.0 h1:rw+wBKmTG1XZcg6AR+NxEou6WUP4f2xlD0ML2WdqXD0= +github.com/thanos-io/thanos v0.38.0/go.mod h1:k92PFaWEiFhvI03q6ZjepRxVw6KMKvEXwFW1DWtnuaE= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1163,8 +1185,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1172,18 +1194,23 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1205,15 +1232,14 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= @@ -1253,8 +1279,8 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1318,8 +1344,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1352,8 +1378,8 @@ golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4 golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1372,8 +1398,8 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1457,8 +1483,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1472,8 +1498,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1492,16 +1518,16 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1562,8 +1588,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1643,8 +1669,8 @@ google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjY google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.162.0 h1:Vhs54HkaEpkMBdgGdOT2P6F0csGG/vxDS0hWHJzmmps= -google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0= +google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= +google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1652,8 +1678,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1789,21 +1813,21 @@ google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1845,8 +1869,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1867,13 +1891,15 @@ google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -1895,26 +1921,26 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= -k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= -k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2 h1:d6nrlgROIvGJrBZnmyTibA2CvXIylet/vBE1EicilRo= k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2/go.mod h1:9ywHbt0kTrLyeNGgTNm7WEns34PmBMEr+9bDKTxW6wQ= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/code-generator v0.30.2 h1:ZY1+aGkqZVwKIyGsOzquaeZ5rSfE6wZHur8z3jQAaiw= k8s.io/code-generator v0.30.2/go.mod h1:RQP5L67QxqgkVquk704CyvWFIq0e6RCMmLTXxjE8dVA= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md new file mode 100644 index 0000000000..39a47c85eb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -0,0 +1,368 @@ +# Changelog + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13) + + +### Features + +* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d)) +* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f)) + + +### Bug Fixes + +* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90)) + +## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10) + + +### Bug Fixes + +* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04) + + +### Features + +* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005) + + +### Bug Fixes + +* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) + + +### Features + +* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344)) + +## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12) + + +### Bug Fixes + +* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556) + +## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06) + + +### Bug Fixes + +* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2)) +* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30) + + +### Features + +* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b)) + +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) + + +### Features + +* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b)) + +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) + + +### Features + +* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814) + + +### Bug Fixes + +* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19) + + +### Bug Fixes + +* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823) +* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15) + +### Breaking Changes + +In the below mentioned commits there were a few large breaking changes since the +last release of the module. + +1. The `Credentials` type has been moved to the root of the module as it is + becoming the core abstraction for the whole module. +2. Because of the above mentioned change many functions that previously + returned a `TokenProvider` now return `Credentials`. Similarly, these + functions have been renamed to be more specific. +3. Most places that used to take an optional `TokenProvider` now accept + `Credentials`. You can make a `Credentials` from a `TokenProvider` using the + constructor found in the `auth` package. +4. The `detect` package has been renamed to `credentials`. With this change some + function signatures were also updated for better readability. +5. Derivative auth flows like `impersonate` and `downscope` have been moved to + be under the new `credentials` package. + +Although these changes are disruptive we think that they are for the best of the +long-term health of the module. We do not expect any more large breaking changes +like these in future revisions, even before 1.0.0. This version will be the +first version of the auth library that our client libraries start to use and +depend on. + +### Features + +* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6)) +* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d)) +* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670) +* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501)) +* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac)) +* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d)) +* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) +* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b)) +* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd)) +* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824)) +* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9)) +* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10) + + +### Bug Fixes + +* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136) +* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## 0.1.0 (2023-10-18) + + +### Features + +* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e)) +* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993)) +* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8)) +* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7)) +* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c)) +* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5)) +* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff)) +* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe)) +* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04)) +* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507)) +* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/cloud.google.com/go/auth/LICENSE diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md new file mode 100644 index 0000000000..6fe4f0763e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/README.md @@ -0,0 +1,40 @@ +# Google Auth Library for Go + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go new file mode 100644 index 0000000000..cd5e988684 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -0,0 +1,618 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" + + // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, + // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. + defaultExpiryDelta = 225 * time.Second + + universeDomainDefault = "googleapis.com" +) + +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} + + // for testing + timeNow = time.Now +) + +// TokenProvider specifies an interface for anything that can return a token. +type TokenProvider interface { + // Token returns a Token or an error. + // The Token returned must be safe to use + // concurrently. + // The returned Token must not be modified. + // The context provided must be sent along to any requests that are made in + // the implementing code. + Token(context.Context) (*Token, error) +} + +// Token holds the credential token used to authorized requests. All fields are +// considered read-only. +type Token struct { + // Value is the token used to authorize requests. It is usually an access + // token but may be other types of tokens such as ID tokens in some flows. + Value string + // Type is the type of token Value is. If uninitialized, it should be + // assumed to be a "Bearer" token. + Type string + // Expiry is the time the token is set to expire. + Expiry time.Time + // Metadata may include, but is not limited to, the body of the token + // response returned by the server. + Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values +} + +// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not +// expired. A token is considered expired if [Token.Expiry] has passed or will +// pass in the next 225 seconds. +func (t *Token) IsValid() bool { + return t.isValidWithEarlyExpiry(defaultExpiryDelta) +} + +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + +func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { + if t.isEmpty() { + return false + } + if t.Expiry.IsZero() { + return true + } + return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) +} + +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + +// Credentials holds Google credentials, including +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + json []byte + projectID CredentialsPropertyProvider + quotaProjectID CredentialsPropertyProvider + // universeDomain is the default service domain for a given Cloud universe. + universeDomain CredentialsPropertyProvider + + TokenProvider +} + +// JSON returns the bytes associated with the the file used to source +// credentials if one was used. +func (c *Credentials) JSON() []byte { + return c.json +} + +// ProjectID returns the associated project ID from the underlying file or +// environment. +func (c *Credentials) ProjectID(ctx context.Context) (string, error) { + if c.projectID == nil { + return internal.GetProjectID(c.json, ""), nil + } + v, err := c.projectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetProjectID(c.json, v), nil +} + +// QuotaProjectID returns the associated quota project ID from the underlying +// file or environment. +func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) { + if c.quotaProjectID == nil { + return internal.GetQuotaProject(c.json, ""), nil + } + v, err := c.quotaProjectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetQuotaProject(c.json, v), nil +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) { + if c.universeDomain == nil { + return universeDomainDefault, nil + } + v, err := c.universeDomain.GetProperty(ctx) + if err != nil { + return "", err + } + if v == "" { + return universeDomainDefault, nil + } + return v, err +} + +// CredentialsPropertyProvider provides an implementation to fetch a property +// value for [Credentials]. +type CredentialsPropertyProvider interface { + GetProperty(context.Context) (string, error) +} + +// CredentialsPropertyFunc is a type adapter to allow the use of ordinary +// functions as a [CredentialsPropertyProvider]. +type CredentialsPropertyFunc func(context.Context) (string, error) + +// GetProperty loads the properly value provided the given context. +func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) { + return p(ctx) +} + +// CredentialsOptions are used to configure [Credentials]. +type CredentialsOptions struct { + // TokenProvider is a means of sourcing a token for the credentials. Required. + TokenProvider TokenProvider + // JSON is the raw contents of the credentials file if sourced from a file. + JSON []byte + // ProjectIDProvider resolves the project ID associated with the + // credentials. + ProjectIDProvider CredentialsPropertyProvider + // QuotaProjectIDProvider resolves the quota project ID associated with the + // credentials. + QuotaProjectIDProvider CredentialsPropertyProvider + // UniverseDomainProvider resolves the universe domain with the credentials. + UniverseDomainProvider CredentialsPropertyProvider +} + +// NewCredentials returns new [Credentials] from the provided options. +func NewCredentials(opts *CredentialsOptions) *Credentials { + creds := &Credentials{ + TokenProvider: opts.TokenProvider, + json: opts.JSON, + projectID: opts.ProjectIDProvider, + quotaProjectID: opts.QuotaProjectIDProvider, + universeDomain: opts.UniverseDomainProvider, + } + + return creds +} + +// CachedTokenProviderOptions provides options for configuring a cached +// [TokenProvider]. +type CachedTokenProviderOptions struct { + // DisableAutoRefresh makes the TokenProvider always return the same token, + // even if it is expired. The default is false. Optional. + DisableAutoRefresh bool + // ExpireEarly configures the amount of time before a token expires, that it + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. + ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // tokens in a blocking manner. The default is false. Optional. + DisableAsyncRefresh bool +} + +func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { + if ctpo == nil { + return true + } + return !ctpo.DisableAutoRefresh +} + +func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { + if ctpo == nil || ctpo.ExpireEarly == 0 { + return defaultExpiryDelta + } + return ctpo.ExpireEarly +} + +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + +// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned +// by the underlying provider. By default it will refresh tokens asynchronously +// a few minutes before they expire. +func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { + if ctp, ok := tp.(*cachedTokenProvider); ok { + return ctp + } + return &cachedTokenProvider{ + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), + } +} + +type cachedTokenProvider struct { + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool + + mu sync.Mutex + cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool +} + +func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { + c.mu.Lock() + defer c.mu.Unlock() + t := c.cachedToken + now := timeNow() + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if now.After(t.Expiry.Round(0)) { + return invalid + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { + return c.cachedToken, nil + } + t, err := c.tp.Token(ctx) + if err != nil { + return nil, err + } + c.cachedToken = t + return t, nil +} + +// Error is a error associated with retrieving a [Token]. It can hold useful +// additional details for debugging. +type Error struct { + // Response is the HTTP response associated with error. The body will always + // be already closed and consumed. + Response *http.Response + // Body is the HTTP response body. + Body []byte + // Err is the underlying wrapped error. + Err error + + // code returned in the token response + code string + // description returned in the token response + description string + // uri returned in the token response + uri string +} + +func (e *Error) Error() string { + if e.code != "" { + s := fmt.Sprintf("auth: %q", e.code) + if e.description != "" { + s += fmt.Sprintf(" %q", e.description) + } + if e.uri != "" { + s += fmt.Sprintf(" %q", e.uri) + } + return s + } + return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body) +} + +// Temporary returns true if the error is considered temporary and may be able +// to be retried. +func (e *Error) Temporary() bool { + if e.Response == nil { + return false + } + sc := e.Response.StatusCode + return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests +} + +func (e *Error) Unwrap() error { + return e.Err +} + +// Style describes how the token endpoint wants to receive the ClientID and +// ClientSecret. +type Style int + +const ( + // StyleUnknown means the value has not been initiated. Sending this in + // a request will cause the token exchange to fail. + StyleUnknown Style = iota + // StyleInParams sends client info in the body of a POST request. + StyleInParams + // StyleInHeader sends client info using Basic Authorization header. + StyleInHeader +) + +// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow. +type Options2LO struct { + // Email is the OAuth2 client ID. This value is set as the "iss" in the + // JWT. + Email string + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. It is used to sign + // the JWT created. + PrivateKey []byte + // TokenURL is th URL the JWT is sent to. Required. + TokenURL string + // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the + // "kid" in the JWT header. Optional. + PrivateKeyID string + // Subject is the used for to impersonate a user. It is used as the "sub" in + // the JWT.m Optional. + Subject string + // Scopes specifies requested permissions for the token. Optional. + Scopes []string + // Expires specifies the lifetime of the token. Optional. + Expires time.Duration + // Audience specifies the "aud" in the JWT. Optional. + Audience string + // PrivateClaims allows specifying any custom claims for the JWT. Optional. + PrivateClaims map[string]interface{} + + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // UseIDToken requests that the token returned be an ID token if one is + // returned from the server. Optional. + UseIDToken bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger +} + +func (o *Options2LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func (o *Options2LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.Email == "" { + return errors.New("auth: email must be provided") + } + if len(o.PrivateKey) == 0 { + return errors.New("auth: private key must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + return nil +} + +// New2LOTokenProvider returns a [TokenProvider] from the provided options. +func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil +} + +type tokenProvider2LO struct { + opts *Options2LO + Client *http.Client + logger *slog.Logger +} + +func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { + pk, err := internal.ParseKey(tp.opts.PrivateKey) + if err != nil { + return nil, err + } + claimSet := &jwt.Claims{ + Iss: tp.opts.Email, + Scope: strings.Join(tp.opts.Scopes, " "), + Aud: tp.opts.TokenURL, + AdditionalClaims: tp.opts.PrivateClaims, + Sub: tp.opts.Subject, + } + if t := tp.opts.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := tp.opts.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = tp.opts.PrivateKeyID + payload, err := jwt.EncodeJWS(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) + resp, body, err := internal.DoRequest(tp.Client, req) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, &Error{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + token := &Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + token.Metadata = make(map[string]interface{}) + json.Unmarshal(body, &token.Metadata) // no error checks for optional fields + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jwt.DecodeJWS(v) + if err != nil { + return nil, fmt.Errorf("auth: error decoding JWT token: %w", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if tp.opts.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("auth: response doesn't have JWT token") + } + token.Value = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go new file mode 100644 index 0000000000..8afd0472ea --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -0,0 +1,90 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" +) + +var ( + computeTokenMetadata = map[string]interface{}{ + "auth.google.tokenSource": "compute-metadata", + "auth.google.serviceAccount": "default", + } + computeTokenURI = "instance/service-accounts/default/token" +) + +// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that +// uses the metadata service to retrieve tokens. +func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { + return auth.NewCachedTokenProvider(&computeProvider{ + scopes: opts.Scopes, + client: client, + }, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, + }) +} + +// computeProvider fetches tokens from the google cloud metadata service. +type computeProvider struct { + scopes []string + client *metadata.Client +} + +type metadataTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` +} + +func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { + tokenURI, err := url.Parse(computeTokenURI) + if err != nil { + return nil, err + } + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI.RawQuery = v.Encode() + } + tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + var res metadataTokenResp + if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { + return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, errors.New("credentials: incomplete token received from metadata") + } + return &auth.Token{ + Value: res.AccessToken, + Type: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + Metadata: computeTokenMetadata, + }, nil + +} diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go new file mode 100644 index 0000000000..a1b5a93188 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -0,0 +1,279 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow. + jwtTokenURL = "https://oauth2.googleapis.com/token" + + // Google's OAuth 2.0 default endpoints. + googleAuthURL = "https://accounts.google.com/o/oauth2/auth" + googleTokenURL = "https://oauth2.googleapis.com/token" + + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + + // Help on default credentials + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +) + +var ( + // for testing + allowOnGCECheck = true +) + +// OnGCE reports whether this process is running in Google Cloud. +func OnGCE() bool { + // TODO(codyoss): once all libs use this auth lib move metadata check here + return allowOnGCECheck && metadata.OnGCE() +} + +// DetectDefault searches for "Application Default Credentials" and returns +// a credential based on the [DetectOptions] provided. +// +// It looks for credentials in the following places, preferring the first +// location found: +// +// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS +// environment variable. For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation +// on how to generate the JSON configuration file for on-prem/non-Google +// cloud platforms. +// - A JSON file in a location known to the gcloud command-line tool. On +// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On +// other systems, $HOME/.config/gcloud/application_default_credentials.json. +// - On Google Compute Engine, Google App Engine standard second generation +// runtimes, and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if len(opts.CredentialsJSON) > 0 { + return readCredentialsFileJSON(opts.CredentialsJSON, opts) + } + if opts.CredentialsFile != "" { + return readCredentialsFile(opts.CredentialsFile, opts) + } + if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err + } + return creds, nil + } + + fileName := credsfile.GetWellKnownFileName() + if b, err := os.ReadFile(fileName); err == nil { + return readCredentialsFileJSON(b, opts) + } + + if OnGCE() { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: opts.logger(), + }) + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts, metadataClient), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadataClient.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ + MetadataClient: metadataClient, + }, + }), nil + } + + return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL) +} + +// DetectOptions provides configuration for [DetectDefault]. +type DetectOptions struct { + // Scopes that credentials tokens should have. Example: + // https://www.googleapis.com/auth/cloud-platform. Required if Audience is + // not provided. + Scopes []string + // Audience that credentials tokens should have. Only applicable for 2LO + // flows with service accounts. If specified, scopes should not be provided. + Audience string + // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + // EarlyTokenRefresh configures how early before a token expires that it + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. + EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool + // AuthHandlerOptions configures an authorization handler and other options + // for 3LO flows. It is required, and only used, for client credential + // flows. + AuthHandlerOptions *auth.AuthorizationHandlerOptions + // TokenURL allows to set the token endpoint for user credential flows. If + // unset the default value is: https://oauth2.googleapis.com/token. + // Optional. + TokenURL string + // STSAudience is the audience sent to when retrieving an STS token. + // Currently this only used for GDCH auth flow, for which it is required. + STSAudience string + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. If provided, CredentialsJSON must not be. + // Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. If provided, CredentialsFile must not be. + // Optional. + CredentialsJSON []byte + // UseSelfSignedJWT directs service account based credentials to create a + // self-signed JWT with the private key found in the file, skipping any + // network requests that would normally be made. Optional. + UseSelfSignedJWT bool + // Client configures the underlying client used to make network requests + // when fetching tokens. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This option is ignored for + // authentication flows that do not support universe domain. Optional. + UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger +} + +func (o *DetectOptions) validate() error { + if o == nil { + return errors.New("credentials: options must be provided") + } + if len(o.Scopes) > 0 && o.Audience != "" { + return errors.New("credentials: both scopes and audience were provided") + } + if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" { + return errors.New("credentials: both credentials file and JSON were provided") + } + return nil +} + +func (o *DetectOptions) tokenURL() string { + if o.TokenURL != "" { + return o.TokenURL + } + return googleTokenURL +} + +func (o *DetectOptions) scopes() []string { + scopes := make([]string, len(o.Scopes)) + copy(scopes, o.Scopes) + return scopes +} + +func (o *DetectOptions) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func (o *DetectOptions) logger() *slog.Logger { + return internallog.New(o.Logger) +} + +func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return readCredentialsFileJSON(b, opts) +} + +func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + // attempt to parse jsonData as a Google Developers Console client_credentials.json. + config := clientCredConfigFromJSON(b, opts) + if config != nil { + if config.AuthHandlerOpts == nil { + return nil, errors.New("credentials: auth handler must be specified for this credential filetype") + } + tp, err := auth.New3LOTokenProvider(config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + }), nil + } + return fileCredentials(b, opts) +} + +func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { + var creds credsfile.ClientCredentialsFile + var c *credsfile.Config3LO + if err := json.Unmarshal(b, &creds); err != nil { + return nil + } + switch { + case creds.Web != nil: + c = creds.Web + case creds.Installed != nil: + c = creds.Installed + default: + return nil + } + if len(c.RedirectURIs) < 1 { + return nil + } + var handleOpts *auth.AuthorizationHandlerOptions + if opts.AuthHandlerOptions != nil { + handleOpts = &auth.AuthorizationHandlerOptions{ + Handler: opts.AuthHandlerOptions.Handler, + State: opts.AuthHandlerOptions.State, + PKCEOpts: opts.AuthHandlerOptions.PKCEOpts, + } + } + return &auth.Options3LO{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: opts.scopes(), + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + Client: opts.client(), + Logger: opts.logger(), + EarlyTokenExpiry: opts.EarlyTokenRefresh, + AuthHandlerOpts: handleOpts, + // TODO(codyoss): refactor this out. We need to add in auto-detection + // for this use case. + AuthStyle: auth.StyleInParams, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go new file mode 100644 index 0000000000..1dbb2866b9 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. It supports the Web server flow, +// client-side credentials, service accounts, Google Compute Engine service +// accounts, Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # Credentials +// +// The [cloud.google.com/go/auth.Credentials] type represents Google +// credentials, including Application Default Credentials. +// +// Use [DetectDefault] to obtain Application Default Credentials. +// +// Application Default Credentials support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage, and +// store service account private keys locally. +// +// # Workforce Identity Federation +// +// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount]. +package credentials diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go new file mode 100644 index 0000000000..e5243e6cfb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -0,0 +1,231 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "errors" + "fmt" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/externalaccount" + "cloud.google.com/go/auth/credentials/internal/externalaccountuser" + "cloud.google.com/go/auth/credentials/internal/gdch" + "cloud.google.com/go/auth/credentials/internal/impersonate" + internalauth "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + fileType, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + + var projectID, universeDomain string + var tp auth.TokenProvider + switch fileType { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.ProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.UserCredentialsKey: + f, err := credsfile.ParseUserCredentials(b) + if err != nil { + return nil, err + } + tp, err = handleUserCredential(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ExternalAccountKey: + f, err := credsfile.ParseExternalAccount(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.ExternalAccountAuthorizedUserKey: + f, err := credsfile.ParseExternalAccountAuthorizedUser(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccountAuthorizedUser(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ImpersonatedServiceAccountKey: + f, err := credsfile.ParseImpersonatedServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleImpersonatedServiceAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.GDCHServiceAccountKey: + f, err := credsfile.ParseGDCHServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleGDCHServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.Project + universeDomain = f.UniverseDomain + default: + return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + }), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override + UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), + }), nil +} + +// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to +// support configuring universe-specific credentials in code. Auth flows +// unsupported for universe domain should not use this func, but should instead +// simply set the file universe domain on the credentials. +func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string { + if optsUniverseDomain != "" { + return optsUniverseDomain + } + return fileUniverseDomain +} + +func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + if opts.UseSelfSignedJWT { + return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + Client: opts.client(), + Logger: opts.logger(), + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + return auth.New2LOTokenProvider(opts2LO) +} + +func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { + opts3LO := &auth.Options3LO{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + AuthURL: googleAuthURL, + TokenURL: opts.tokenURL(), + AuthStyle: auth.StyleInParams, + EarlyTokenExpiry: opts.EarlyTokenRefresh, + RefreshToken: f.RefreshToken, + Client: opts.client(), + Logger: opts.logger(), + } + return auth.New3LOTokenProvider(opts3LO) +} + +func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccount.Options{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: opts.scopes(), + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + Client: opts.client(), + Logger: opts.logger(), + IsDefaultClient: opts.Client == nil, + } + if f.ServiceAccountImpersonation != nil { + externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds + } + return externalaccount.NewTokenProvider(externalOpts) +} + +func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccountuser.Options{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + Client: opts.client(), + Logger: opts.logger(), + } + return externalaccountuser.NewTokenProvider(externalOpts) +} + +func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + tp, err := fileCredentials(f.CredSource, opts) + if err != nil { + return nil, err + } + return impersonate.NewTokenProvider(&impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: tp, + Delegates: f.Delegates, + Client: opts.client(), + Logger: opts.logger(), + }) +} + +func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + return gdch.NewTokenProvider(f, &gdch.Options{ + STSAudience: opts.STSAudience, + Client: opts.client(), + Logger: opts.logger(), + }) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go new file mode 100644 index 0000000000..9ecd1f64bd --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -0,0 +1,531 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +var ( + // getenv aliases os.Getenv for testing + getenv = os.Getenv +) + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTTL = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // Supported AWS configuration environment variables. + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnvVar = "AWS_REGION" + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" + awsProviderType = "aws" +) + +type awsSubjectProvider struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + IMDSv2SessionTokenURL string + TargetResource string + requestSigner *awsRequestSigner + region string + securityCredentialsProvider AwsSecurityCredentialsProvider + reqOpts *RequestOptions + + Client *http.Client + logger *slog.Logger +} + +func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { + // Set Defaults + if sp.RegionalCredVerificationURL == "" { + sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL + } + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if sp.TargetResource != "" { + req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource) + } + sp.requestSigner.signRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (sp *awsSubjectProvider) providerType() string { + if sp.securityCredentialsProvider != nil { + return programmaticProviderType + } + return awsProviderType +} + +func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) { + if sp.IMDSv2SessionTokenURL == "" { + return "", nil + } + req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + + sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) + } + return string(body), nil +} + +func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv(awsDefaultRegionEnvVar), nil + } + + if sp.RegionURL == "" { + return "", errors.New("credentials: unable to determine AWS region") + } + + req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + bodyLen := len(body) + if bodyLen == 0 { + return "", nil + } + return string(body[:bodyLen-1]), nil +} + +func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyIDEnvVar), + SecretAccessKey: getenv(awsSecretAccessKeyEnvVar), + SessionToken: getenv(awsSessionTokenEnvVar), + }, nil + } + + roleName, err := sp.getMetadataRoleName(ctx, headers) + if err != nil { + return + } + credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("credentials: missing AccessKeyId credential") + } + if credentials.SecretAccessKey == "" { + return result, errors.New("credentials: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) { + var result *AwsSecurityCredentials + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + for name, value := range headers { + req.Header.Add(name, value) + } + sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return result, err + } + sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) + if resp.StatusCode != http.StatusOK { + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err + } + return result, nil +} + +func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { + if sp.CredVerificationURL == "" { + return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint") + } + req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil) + if err != nil { + return "", err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) + } + return string(body), nil +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// signRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) signRequest(req *http.Request) error { + // req is assumed non-nil + signedRequest := cloneRequest(req) + timestamp := Now() + signedRequest.Header.Set("host", requestHost(req)) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/") + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n") + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = internal.ReadAll(requestBody) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is +// required. +func canRetrieveRegionFromEnvironment() bool { + return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != "" +} + +// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. +func canRetrieveSecurityCredentialFromEnvironment() bool { + return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != "" +} + +func (sp *awsSubjectProvider) shouldUseMetadataServer() bool { + return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go new file mode 100644 index 0000000000..d5765c4749 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go @@ -0,0 +1,284 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +const ( + executableSupportedMaxVersion = 1 + executableDefaultTimeout = 30 * time.Second + executableSource = "response" + executableProviderType = "executable" + outputFileSource = "output file" + + allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" + idTokenType = "urn:ietf:params:oauth:token-type:id_token" + saml2TokenType = "urn:ietf:params:oauth:token-type:saml2" +) + +var ( + serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`) +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +// environment is a contract for testing +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError) + } + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableSubjectProvider struct { + Command string + Timeout time.Duration + OutputFile string + client *http.Client + opts *Options + env environment +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IDToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + // Validate + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + if result.Success == nil { + return "", missingFieldError(source, "success") + } + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + if result.ExpirationTime == 0 && sp.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + switch result.TokenType { + case jwtTokenType, idTokenType: + if result.IDToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IDToken, nil + case saml2TokenType: + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + default: + return "", tokenTypeError(source) + } +} + +func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) { + if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + return sp.getTokenFromExecutableCommand(ctx) +} + +func (sp *executableSubjectProvider) providerType() string { + return executableProviderType +} + +func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) { + if sp.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(sp.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := internal.ReadAll(file) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (sp *executableSubjectProvider) executableEnvironment() []string { + result := sp.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if sp.opts.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if sp.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile)) + } + return result +} + +func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if sp.env.getenv(allowExecutablesEnvVar) != "1" { + return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + } + + ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout)) + defer cancel() + + output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment()) + if err != nil { + return "", err + } + return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix()) +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("credentials: %q missing %q field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("credentials: unable to parse %q: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"credentials: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("credentials: %v contains unsupported token type", source) +} + +func exitCodeError(err *exec.ExitError) error { + return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err) +} + +func executableError(err error) error { + return fmt.Errorf("credentials: executable command failed: %w", err) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go new file mode 100644 index 0000000000..a822064234 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -0,0 +1,428 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/impersonate" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +var ( + // Now aliases time.Now for testing + Now = func() time.Time { + return time.Now().UTC() + } + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +// Options stores the configuration for fetching tokens with external credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource *credsfile.CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + // WorkforcePoolUserProject should be set when it is a workforce pool and + // not a workload identity pool. The underlying principal must still have + // serviceusage.services.use IAM permission to use the project for + // billing/quota. Optional. + WorkforcePoolUserProject string + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string + // SubjectTokenProvider is an optional token provider for OIDC/SAML + // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider + // or CredentialSource must be provided. Optional. + SubjectTokenProvider SubjectTokenProvider + // AwsSecurityCredentialsProvider is an AWS Security Credential provider + // for AWS credentials. One of SubjectTokenProvider, + // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional. + AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider + // Client for token request. + Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger +} + +// SubjectTokenProvider can be used to supply a subject token to exchange for a +// GCP access token. +type SubjectTokenProvider interface { + // SubjectToken should return a valid subject token or an error. + // The external account token provider does not cache the returned subject + // token, so caching logic should be implemented in the provider to prevent + // multiple requests for the same subject token. + SubjectToken(ctx context.Context, opts *RequestOptions) (string, error) +} + +// RequestOptions contains information about the requested subject token or AWS +// security credentials from the Google external account credential. +type RequestOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external + // account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials +// and an AWS Region to exchange for a GCP access token. +type AwsSecurityCredentialsProvider interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, opts *RequestOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of + // AwsSecurityCredentials or an error. The external account token provider + // does not cache the returned security credentials, so caching logic should + // be implemented in the provider to prevent multiple requests for the + // same security credentials. + AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error) +} + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for + // temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +func (o *Options) validate() error { + if o.Audience == "" { + return fmt.Errorf("externalaccount: Audience must be set") + } + if o.SubjectTokenType == "" { + return fmt.Errorf("externalaccount: Subject token type must be set") + } + if o.WorkforcePoolUserProject != "" { + if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid { + return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials") + } + } + count := 0 + if o.CredentialSource != nil { + count++ + } + if o.SubjectTokenProvider != nil { + count++ + } + if o.AwsSecurityCredentialsProvider != nil { + count++ + } + if count == 0 { + return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + if count > 1 { + return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + return nil +} + +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + +// resolveTokenURL sets the default STS token endpoint with the configured +// universe domain. +func (o *Options) resolveTokenURL() { + if o.TokenURL != "" { + return + } else if o.UniverseDomain != "" { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1) + } else { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + opts.resolveTokenURL() + logger := internallog.New(opts.Logger) + stp, err := newSubjectTokenProvider(opts) + if err != nil { + return nil, err + } + + client, err := opts.client() + if err != nil { + return nil, err + } + + tp := &tokenProvider{ + client: client, + opts: opts, + stp: stp, + logger: logger, + } + + if opts.ServiceAccountImpersonationURL == "" { + return auth.NewCachedTokenProvider(tp, nil), nil + } + + scopes := make([]string, len(opts.Scopes)) + copy(scopes, opts.Scopes) + // needed for impersonation + tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp, err := impersonate.NewTokenProvider(&impersonate.Options{ + Client: client, + URL: opts.ServiceAccountImpersonationURL, + Scopes: scopes, + Tp: auth.NewCachedTokenProvider(tp, nil), + TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + Logger: logger, + }) + if err != nil { + return nil, err + } + return auth.NewCachedTokenProvider(imp, nil), nil +} + +type subjectTokenProvider interface { + subjectToken(ctx context.Context) (string, error) + providerType() string +} + +// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. +type tokenProvider struct { + client *http.Client + logger *slog.Logger + opts *Options + stp subjectTokenProvider +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + subjectToken, err := tp.stp.subjectToken(ctx) + if err != nil { + return nil, err + } + + stsRequest := &stsexchange.TokenRequest{ + GrantType: stsexchange.GrantType, + Audience: tp.opts.Audience, + Scope: tp.opts.Scopes, + RequestedTokenType: stsexchange.TokenType, + SubjectToken: subjectToken, + SubjectTokenType: tp.opts.SubjectTokenType, + } + header := make(http.Header) + header.Set("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: tp.opts.ClientID, + ClientSecret: tp.opts.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" { + options = map[string]interface{}{ + "userProject": tp.opts.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{ + Client: tp.client, + Endpoint: tp.opts.TokenURL, + Request: stsRequest, + Authentication: clientAuth, + Headers: header, + ExtraOpts: options, + Logger: tp.logger, + }) + if err != nil { + return nil, err + } + + tok := &auth.Token{ + Value: stsResp.AccessToken, + Type: stsResp.TokenType, + } + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("credentials: got invalid expiry from security token service") + } + tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + return tok, nil +} + +// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a +// subjectTokenProvider +func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + logger := internallog.New(o.Logger) + reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} + if o.AwsSecurityCredentialsProvider != nil { + return &awsSubjectProvider{ + securityCredentialsProvider: o.AwsSecurityCredentialsProvider, + TargetResource: o.Audience, + reqOpts: reqOpts, + logger: logger, + }, nil + } else if o.SubjectTokenProvider != nil { + return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil + } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion) + } + + awsProvider := &awsSubjectProvider{ + EnvironmentID: o.CredentialSource.EnvironmentID, + RegionURL: o.CredentialSource.RegionURL, + RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: o.CredentialSource.URL, + TargetResource: o.Audience, + Client: o.Client, + logger: logger, + } + if o.CredentialSource.IMDSv2SessionTokenURL != "" { + awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL + } + + return awsProvider, nil + } + } else if o.CredentialSource.File != "" { + return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil + } else if o.CredentialSource.URL != "" { + return &urlSubjectProvider{ + URL: o.CredentialSource.URL, + Headers: o.CredentialSource.Headers, + Format: o.CredentialSource.Format, + Client: o.Client, + Logger: logger, + }, nil + } else if o.CredentialSource.Executable != nil { + ec := o.CredentialSource.Executable + if ec.Command == "" { + return nil, errors.New("credentials: missing `command` field — executable command must be provided") + } + + execProvider := &executableSubjectProvider{} + execProvider.Command = ec.Command + if ec.TimeoutMillis == 0 { + execProvider.Timeout = executableDefaultTimeout + } else { + execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond + if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum { + return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds()) + } + } + execProvider.OutputFile = ec.OutputFile + execProvider.client = o.Client + execProvider.opts = o + execProvider.env = runtimeEnvironment{} + return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil + } + return nil, errors.New("credentials: unable to parse credential source") +} + +func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + p.providerType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go new file mode 100644 index 0000000000..8186939fe1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileProviderType = "file" +) + +type fileSubjectProvider struct { + File string + Format *credsfile.Format +} + +func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) { + tokenFile, err := os.Open(sp.File) + if err != nil { + return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err) + } + defer tokenFile.Close() + tokenBytes, err := internal.ReadAll(tokenFile) + if err != nil { + return "", fmt.Errorf("credentials: failed to read credential file: %w", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + + if sp.Format == nil { + return string(tokenBytes), nil + } + switch sp.Format.Type { + case fileTypeJSON: + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(tokenBytes), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *fileSubjectProvider) providerType() string { + return fileProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go new file mode 100644 index 0000000000..8e4b4379b4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go @@ -0,0 +1,74 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return versionUnknown +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go new file mode 100644 index 0000000000..be3c87351f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import "context" + +type programmaticProvider struct { + opts *RequestOptions + stp SubjectTokenProvider +} + +func (pp *programmaticProvider) providerType() string { + return programmaticProviderType +} + +func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) { + return pp.stp.SubjectToken(ctx, pp.opts) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go new file mode 100644 index 0000000000..754ecf4fef --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + fileTypeText = "text" + fileTypeJSON = "json" + urlProviderType = "url" + programmaticProviderType = "programmatic" + x509ProviderType = "x509" +) + +type urlSubjectProvider struct { + URL string + Headers map[string]string + Format *credsfile.Format + Client *http.Client + Logger *slog.Logger +} + +func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil) + if err != nil { + return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err) + } + + for key, val := range sp.Headers { + req.Header.Add(key, val) + } + sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) + } + sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return "", fmt.Errorf("credentials: status code %d: %s", c, body) + } + + if sp.Format == nil { + return string(body), nil + } + switch sp.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(body, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(body), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *urlSubjectProvider) providerType() string { + return urlProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 0000000000..115df5881f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go new file mode 100644 index 0000000000..ae39206e5f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -0,0 +1,115 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccountuser + +import ( + "context" + "errors" + "log/slog" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +// Options stores the configuration for fetching tokens with external authorized +// user credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the + // resource name for the workforce pool and the provider identifier in that + // pool. + Audience string + // RefreshToken is the OAuth 2.0 refresh token. + RefreshToken string + // TokenURL is the STS token exchange endpoint for refresh. + TokenURL string + // TokenInfoURL is the STS endpoint URL for token introspection. Optional. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described + // below. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs + // to be called with the generated a cloud access token. When provided, STS + // will be called with additional basic authentication using client_id as + // username and client_secret as password. + ClientSecret string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + + // Client for token request. + Client *http.Client + // Logger for logging. + Logger *slog.Logger +} + +func (c *Options) validate() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if !opts.validate() { + return nil, errors.New("credentials: invalid external_account_authorized_user configuration") + } + + tp := &tokenProvider{ + o: opts, + } + return auth.NewCachedTokenProvider(tp, nil), nil +} + +type tokenProvider struct { + o *Options +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + opts := tp.o + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + } + headers := make(http.Header) + headers.Set("Content-Type", "application/x-www-form-urlencoded") + stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{ + Client: opts.Client, + Endpoint: opts.TokenURL, + RefreshToken: opts.RefreshToken, + Authentication: clientAuth, + Headers: headers, + Logger: internallog.New(tp.o.Logger), + }) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("credentials: invalid expiry from security token service") + } + + // guarded by the wrapping with CachedTokenProvider + if stsResponse.RefreshToken != "" { + opts.RefreshToken = stsResponse.RefreshToken + } + return &auth.Token{ + Value: stsResponse.AccessToken, + Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + Type: internal.TokenTypeBearer, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go new file mode 100644 index 0000000000..c2d320fdf4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -0,0 +1,191 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gdch + +import ( + "context" + "crypto" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "net/url" + "os" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + // GrantType is the grant type for the token request. + GrantType = "urn:ietf:params:oauth:token-type:token-exchange" + requestTokenType = "urn:ietf:params:oauth:token-type:access_token" + subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount" +) + +var ( + gdchSupportFormatVersions map[string]bool = map[string]bool{ + "1": true, + } +) + +// Options for [NewTokenProvider]. +type Options struct { + STSAudience string + Client *http.Client + Logger *slog.Logger +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a +// GDCH cred file. +func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) { + if !gdchSupportFormatVersions[f.FormatVersion] { + return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion) + } + if o.STSAudience == "" { + return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") + } + signer, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, err + } + certPool, err := loadCertPool(f.CertPath) + if err != nil { + return nil, err + } + + tp := gdchProvider{ + serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), + tokenURL: f.TokenURL, + aud: o.STSAudience, + signer: signer, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, + logger: internallog.New(o.Logger), + } + return tp, nil +} + +func loadCertPool(path string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + pem, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read certificate: %w", err) + } + pool.AppendCertsFromPEM(pem) + return pool, nil +} + +type gdchProvider struct { + serviceIdentity string + tokenURL string + aud string + signer crypto.Signer + pkID string + certPool *x509.CertPool + + client *http.Client + logger *slog.Logger +} + +func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { + addCertToTransport(g.client, g.certPool) + iat := time.Now() + exp := iat.Add(time.Hour) + claims := jwt.Claims{ + Iss: g.serviceIdentity, + Sub: g.serviceIdentity, + Aud: g.tokenURL, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(g.pkID), + } + payload, err := jwt.EncodeJWS(&h, &claims, g.signer) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", GrantType) + v.Set("audience", g.aud) + v.Set("requested_token_type", requestTokenType) + v.Set("subject_token", payload) + v.Set("subject_token_type", subjectTokenType) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) + resp, body, err := internal.DoRequest(g.client, req) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, &auth.Error{ + Response: resp, + Body: body, + } + } + + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + token := &auth.Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token.Metadata = raw + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// addCertToTransport makes a best effort attempt at adding in the cert info to +// the client. It tries to keep all configured transport settings if the +// underlying transport is an http.Transport. Or else it overwrites the +// transport with defaults adding in the certs. +func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { + trans, ok := hc.Transport.(*http.Transport) + if !ok { + trans = http.DefaultTransport.(*http.Transport).Clone() + } + trans.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go new file mode 100644 index 0000000000..b3a99261fa --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -0,0 +1,156 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + defaultTokenLifetime = "3600s" + authHeaderKey = "Authorization" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return opts, nil +} + +// Options for [NewTokenProvider]. +type Options struct { + // Tp is the source credential used to generate a token on the + // impersonated service account. Required. + Tp auth.TokenProvider + + // URL is the endpoint to call to generate a token + // on behalf of the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. Defaults to 1 hour if unset. Optional. + TokenLifetimeSeconds int + // Client configures the underlying client used to make network requests + // when fetching tokens. Required. + Client *http.Client + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger +} + +func (o *Options) validate() error { + if o.Tp == nil { + return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials") + } + if o.URL == "" { + return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials") + } + return nil +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + logger := internallog.New(o.Logger) + lifetime := defaultTokenLifetime + if o.TokenLifetimeSeconds != 0 { + lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetime, + Scope: o.Scopes, + Delegates: o.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("credentials: unable to marshal request: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if err := setAuthHeader(ctx, o.Tp, req); err != nil { + return nil, err + } + logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) + } + logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("credentials: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + Type: internal.TokenTypeBearer, + }, nil +} + +func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { + t, err := tp.Token(ctx) + if err != nil { + return err + } + typ := t.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + r.Header.Set(authHeaderKey, typ+" "+t.Value) + return nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go new file mode 100644 index 0000000000..e1d2b15034 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -0,0 +1,167 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stsexchange + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + // GrantType for a sts exchange. + GrantType = "urn:ietf:params:oauth:grant-type:token-exchange" + // TokenType for a sts exchange. + TokenType = "urn:ietf:params:oauth:token-type:access_token" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" +) + +// Options stores the configuration for making an sts exchange request. +type Options struct { + Client *http.Client + Logger *slog.Logger + Endpoint string + Request *TokenRequest + Authentication ClientAuthentication + Headers http.Header + // ExtraOpts are optional fields marshalled into the `options` field of the + // request body. + ExtraOpts map[string]interface{} + RefreshToken string +} + +// RefreshAccessToken performs the token exchange using a refresh token flow. +func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", opts.RefreshToken) + return doRequest(ctx, opts, data) +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("audience", opts.Request.Audience) + data.Set("grant_type", GrantType) + data.Set("requested_token_type", TokenType) + data.Set("subject_token_type", opts.Request.SubjectTokenType) + data.Set("subject_token", opts.Request.SubjectToken) + data.Set("scope", strings.Join(opts.Request.Scope, " ")) + if opts.ExtraOpts != nil { + opts, err := json.Marshal(opts.ExtraOpts) + if err != nil { + return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err) + } + data.Set("options", string(opts)) + } + return doRequest(ctx, opts, data) +} + +func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { + opts.Authentication.InjectAuthentication(data, opts.Headers) + encodedData := data.Encode() + logger := internallog.New(opts.Logger) + + req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err) + + } + for key, list := range opts.Headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + + logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) + resp, body, err := internal.DoRequest(opts.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) + } + logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + var stsResp TokenResponse + if err := json.Unmarshal(body, &stsResp); err != nil { + return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err) + } + + return &stsResp, nil +} + +// TokenRequest contains fields necessary to make an oauth2 token +// exchange. +type TokenRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// TokenResponse is used to decode the remote server response during +// an oauth2 token exchange. +type TokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// ClientAuthentication represents an OAuth client ID and secret and the +// mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + AuthStyle auth.Style + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service +// exchange request. It modifies either the passed url.Values or http.Header +// depending on the desired authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + switch c.AuthStyle { + case auth.StyleInHeader: + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go new file mode 100644 index 0000000000..8d335ccecc --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -0,0 +1,89 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "crypto" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +var ( + // for testing + now func() time.Time = time.Now +) + +// configureSelfSignedJWT uses the private key in the service account to create +// a JWT without making a network call. +func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } + signer, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("credentials: could not parse key: %w", err) + } + return &selfSignedTokenProvider{ + email: f.ClientEmail, + audience: opts.Audience, + scopes: opts.scopes(), + signer: signer, + pkID: f.PrivateKeyID, + logger: opts.logger(), + }, nil +} + +type selfSignedTokenProvider struct { + email string + audience string + scopes []string + signer crypto.Signer + pkID string + logger *slog.Logger +} + +func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { + iat := now() + exp := iat.Add(time.Hour) + scope := strings.Join(tp.scopes, " ") + c := &jwt.Claims{ + Iss: tp.email, + Sub: tp.email, + Aud: tp.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := &jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(tp.pkID), + } + tok, err := jwt.EncodeJWS(h, c, tp.signer) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + tp.logger.Debug("created self-signed JWT", "token", tok) + return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go new file mode 100644 index 0000000000..e613608057 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go @@ -0,0 +1,62 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package grpctransport + +import ( + "context" + "net" + "syscall" + + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go new file mode 100644 index 0000000000..d781c3e49a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -0,0 +1,126 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "net" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal/compute" + "google.golang.org/grpc" + grpcgoogle "google.golang.org/grpc/credentials/google" +) + +func isDirectPathEnabled(endpoint string, opts *Options) bool { + if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { + return false + } + if !checkDirectPathEndPoint(endpoint) { + return false + } + if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b { + return false + } + return true +} + +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + if endpoint == "" { + return false + } + + return true +} + +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { + if tp == nil { + return false + } + tok, err := tp.Token(context.Background()) + if err != nil { + return false + } + if tok == nil { + return false + } + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { + return false + } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.serviceAccount") != "default" { + return false + } + return true +} + +func isDirectPathXdsUsed(o *Options) bool { + // Method 1: Enable DirectPath xDS by env; + if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds { + return true + } + return false +} + +// configureDirectPath returns some dial options and an endpoint to use if the +// configuration allows the use of direct path. If it does not the provided +// grpcOpts and endpoint are returned. +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + if timeoutDialerOption != nil { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + // Check if google-c2p resolver is enabled for DirectPath + if isDirectPathXdsUsed(opts) { + // google-c2p resolver target must not have a port number + if addr, _, err := net.SplitHostPort(endpoint); err == nil { + endpoint = "google-c2p:///" + addr + } else { + endpoint = "google-c2p:///" + endpoint + } + } else { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint + } + grpcOpts = append(grpcOpts, + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) + } + // TODO: add support for system parameters (quota project, request reason) via chained interceptor. + } + return grpcOpts, endpoint +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go new file mode 100644 index 0000000000..95f259037f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -0,0 +1,438 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. +package grpctransport + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log/slog" + "net/http" + "os" + "sync" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "github.com/googleapis/gax-go/v2/internallog" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "google.golang.org/grpc" + grpccreds "google.golang.org/grpc/credentials" + grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" +) + +const ( + // Check env to disable DirectPath traffic. + disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" + + // Check env to decide if using google-c2p resolver for DirectPath traffic. + enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" + + quotaProjectHeaderKey = "X-goog-user-project" +) + +var ( + // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. + timeoutDialerOption grpc.DialOption +) + +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [GRPCClientConnPool] from [Dial]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // Metadata is extra gRPC metadata that will be appended to every outgoing + // request. + Metadata map[string]string + // GRPCDialOpts are dial options that will be passed to `grpc.Dial` when + // establishing a`grpc.Conn`` + GRPCDialOpts []grpc.DialOption + // PoolSize is specifies how many connections to balance between when making + // requests. If unset or less than 1, the value defaults to 1. + PoolSize int + // Credentials used to add Authorization metadata to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *credentials.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("grpctransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +func (o *Options) resolveDetectOptions() *credentials.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } + if do.Logger == nil { + do.Logger = o.logger() + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableNonDefaultSAForDirectPath overrides the default requirement for + // using the default service account for DirectPath. + EnableNonDefaultSAForDirectPath bool + // EnableDirectPath overrides the default attempt to use DirectPath. + EnableDirectPath bool + // EnableDirectPathXds overrides the default DirectPath type. It is only + // valid when DirectPath is enabled. + EnableDirectPathXds bool + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies + // the default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// Dial returns a GRPCClientConnPool that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization metadata to all outgoing requests. +func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.PoolSize <= 1 { + conn, err := dial(ctx, secure, opts) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + pool := &roundRobinConnPool{} + for i := 0; i < opts.PoolSize; i++ { + conn, err := dial(ctx, secure, opts) + if err != nil { + // ignore close error, if any + defer pool.Close() + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 +func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + tOpts.EnableDirectPath = io.EnableDirectPath + tOpts.EnableDirectPathXds = io.EnableDirectPathXds + } + transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) + if err != nil { + return nil, err + } + + if !secure { + transportCreds = grpcinsecure.NewCredentials() + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { + metadata := opts.Metadata + + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + + qp, err := creds.QuotaProjectID(ctx) + if err != nil { + return nil, err + } + if qp != "" { + if metadata == nil { + metadata = make(map[string]string, 1) + } + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } + } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ + creds: creds, + metadata: metadata, + clientUniverseDomain: opts.UniverseDomain, + }), + ) + + // Attempt Direct Path + grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) + grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) + + return grpc.Dial(endpoint, grpcOpts...) +} + +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure +} + +// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcCredentialsProvider struct { + creds *auth.Credentials + + secure bool + + // Additional metadata attached as headers. + metadata map[string]string + clientUniverseDomain string +} + +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. +func (c *grpcCredentialsProvider) getClientUniverseDomain() string { + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD + } + return internal.DefaultUniverseDomain +} + +func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := c.creds.Token(ctx) + if err != nil { + return nil, err + } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } + if c.secure { + ri, _ := grpccreds.RequestInfoFromContext(ctx) + if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err) + } + } + metadata := make(map[string]string, len(c.metadata)+1) + setAuthMetadata(token, metadata) + for k, v := range c.metadata { + metadata[k] = v + } + return metadata, nil +} + +// setAuthMetadata uses the provided token to set the Authorization metadata. +// If the token.Type is empty, the type is assumed to be Bearer. +func setAuthMetadata(token *auth.Token, m map[string]string) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + m["authorization"] = typ + " " + token.Value +} + +func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { + return c.secure +} + +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/pool.go b/vendor/cloud.google.com/go/auth/grpctransport/pool.go new file mode 100644 index 0000000000..642679f9b7 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/pool.go @@ -0,0 +1,119 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "fmt" + "sync/atomic" + + "google.golang.org/grpc" +) + +// GRPCClientConnPool is an interface that satisfies +// [google.golang.org/grpc.ClientConnInterface] and has some utility functions +// that are needed for connection lifecycle when using in a client library. It +// may be a pool or a single connection. This interface is not intended to, and +// can't be, implemented by others. +type GRPCClientConnPool interface { + // Connection returns a [google.golang.org/grpc.ClientConn] from the pool. + // + // ClientConn aren't returned to the pool and should not be closed directly. + Connection() *grpc.ClientConn + + // Len returns the number of connections in the pool. It will always return + // the same value. + Len() int + + // Close closes every ClientConn in the pool. The error returned by Close + // may be a single error or multiple errors. + Close() error + + grpc.ClientConnInterface + + // private ensure others outside this package can't implement this type + private() +} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Len() int { return 1 } +func (p *singleConnPool) private() {} + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Len() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Connection() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Connection().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Connection().NewStream(ctx, desc, method, opts...) +} + +func (p *roundRobinConnPool) private() {} + +// multiError represents errors from multiple conns in the group. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go new file mode 100644 index 0000000000..5758e85b5d --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -0,0 +1,247 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. +package httptransport + +import ( + "crypto/tls" + "errors" + "fmt" + "log/slog" + "net/http" + + "cloud.google.com/go/auth" + detect "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "github.com/googleapis/gax-go/v2/internallog" +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [net/http.Client] from [NewClient]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Headers are extra HTTP headers that will be appended to every outgoing + // request. + Headers http.Header + // BaseRoundTripper overrides the base transport used for serving requests. + // If specified ClientCertProvider is ignored. + BaseRoundTripper http.RoundTripper + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Credentials used to add Authorization header to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *detect.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("httptransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + +func (o *Options) resolveDetectOptions() *detect.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } + if do.Logger == nil { + do.Logger = o.logger() + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies the + // default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that need more control over their transport. + SkipValidation bool + // SkipUniverseDomainValidation skips the verification that the universe + // domain configured for the client matches the universe domain configured + // for the credentials. It should only be used internally for clients that + // need more control over their transport. The default is false. + SkipUniverseDomainValidation bool +} + +// AddAuthorizationMiddleware adds a middleware to the provided client's +// transport that sets the Authorization header with the value produced by the +// provided [cloud.google.com/go/auth.Credentials]. An error is returned only +// if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. +func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { + if client == nil || creds == nil { + return fmt.Errorf("httptransport: client and tp must not be nil") + } + base := client.Transport + if base == nil { + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } + } + client.Transport = &authTransport{ + creds: creds, + base: base, + } + return nil +} + +// NewClient returns a [net/http.Client] that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization headers to all outgoing requests. +func NewClient(opts *Options) (*http.Client, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + } + clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts) + if err != nil { + return nil, err + } + baseRoundTripper := opts.BaseRoundTripper + if baseRoundTripper == nil { + baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) + } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider + trans, err := newTransport(baseRoundTripper, opts) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: trans, + }, nil +} + +// SetAuthHeader uses the provided token to set the Authorization header on a +// request. If the token.Type is empty, the type is assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go new file mode 100644 index 0000000000..ee215b6dc6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -0,0 +1,234 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/cert" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "golang.org/x/net/http2" +) + +const ( + quotaProjectHeaderKey = "X-goog-user-project" +) + +func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { + var headers = opts.Headers + ht := &headerTransport{ + base: base, + headers: headers, + } + var trans http.RoundTripper = ht + trans = addOpenTelemetryTransport(trans, opts) + switch { + case opts.DisableAuthentication: + // Do nothing. + case opts.APIKey != "": + qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey)) + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + trans = &apiKeyTransport{ + Transport: trans, + Key: opts.APIKey, + } + default: + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + qp, err := creds.QuotaProjectID(context.Background()) + if err != nil { + return nil, err + } + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } + } + var skipUD bool + if iOpts := opts.InternalOptions; iOpts != nil { + skipUD = iOpts.SkipUniverseDomainValidation + } + creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) + trans = &authTransport{ + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + skipUniverseDomainValidation: skipUD, + } + } + return trans, nil +} + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + // Configures the ReadIdleTimeout HTTP/2 option for the + // transport. This allows broken idle connections to be pruned more quickly, + // preventing the client from attempting to re-use connections that will no + // longer work. + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } + + return trans +} + +type apiKeyTransport struct { + // Key is the API Key to set on requests. + Key string + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return t.Transport.RoundTrip(&newReq) +} + +type headerTransport struct { + headers http.Header + base http.RoundTripper +} + +func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + + for k, v := range t.headers { + newReq.Header[k] = v + } + + return rt.RoundTrip(&newReq) +} + +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return otelhttp.NewTransport(trans) +} + +type authTransport struct { + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string + skipUniverseDomainValidation bool +} + +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. +func (t *authTransport) getClientUniverseDomain() string { + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD + } + return internal.DefaultUniverseDomain +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. Per the RoundTripper contract we must +// not modify the initial request, so we clone it, and we must close the body +// on any errors that happens during our token logic. +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + token, err := t.creds.Token(req.Context()) + if err != nil { + return nil, err + } + if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } + req2 := req.Clone(req.Context()) + SetAuthHeader(token, req2) + reqBodyClosed = true + return t.base.RoundTrip(req2) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 0000000000..05c7e8bdd4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,65 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 0000000000..af490bf4f4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 0000000000..d92178df86 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 0000000000..16be9df306 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go new file mode 100644 index 0000000000..9cd4bed61b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go @@ -0,0 +1,107 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credsfile is meant to hide implementation details from the pubic +// surface of the detect package. It should not import any other packages in +// this module. It is located under the main internal package so other +// sub-packages can use these parsed types as well. +package credsfile + +import ( + "os" + "os/user" + "path/filepath" + "runtime" +) + +const ( + // GoogleAppCredsEnvVar is the environment variable for setting the + // application default credentials. + GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + userCredsFilename = "application_default_credentials.json" +) + +// CredentialType represents different credential filetypes Google credentials +// can be. +type CredentialType int + +const ( + // UnknownCredType is an unidentified file type. + UnknownCredType CredentialType = iota + // UserCredentialsKey represents a user creds file type. + UserCredentialsKey + // ServiceAccountKey represents a service account file type. + ServiceAccountKey + // ImpersonatedServiceAccountKey represents a impersonated service account + // file type. + ImpersonatedServiceAccountKey + // ExternalAccountKey represents a external account file type. + ExternalAccountKey + // GDCHServiceAccountKey represents a GDCH file type. + GDCHServiceAccountKey + // ExternalAccountAuthorizedUserKey represents a external account authorized + // user file type. + ExternalAccountAuthorizedUserKey +) + +// parseCredentialType returns the associated filetype based on the parsed +// typeString provided. +func parseCredentialType(typeString string) CredentialType { + switch typeString { + case "service_account": + return ServiceAccountKey + case "authorized_user": + return UserCredentialsKey + case "impersonated_service_account": + return ImpersonatedServiceAccountKey + case "external_account": + return ExternalAccountKey + case "external_account_authorized_user": + return ExternalAccountAuthorizedUserKey + case "gdch_service_account": + return GDCHServiceAccountKey + default: + return UnknownCredType + } +} + +// GetFileNameFromEnv returns the override if provided or detects a filename +// from the environment. +func GetFileNameFromEnv(override string) string { + if override != "" { + return override + } + return os.Getenv(GoogleAppCredsEnvVar) +} + +// GetWellKnownFileName tries to locate the filepath for the user credential +// file based on the environment. +func GetWellKnownFileName() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename) +} + +// guessUnixHomeDir default to checking for HOME, but not all unix systems have +// this set, do have a fallback. +func guessUnixHomeDir() string { + if v := os.Getenv("HOME"); v != "" { + return v + } + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go new file mode 100644 index 0000000000..3be6e5bbb4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -0,0 +1,157 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// Config3LO is the internals of a client creds file. +type Config3LO struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` +} + +// ClientCredentialsFile representation. +type ClientCredentialsFile struct { + Web *Config3LO `json:"web"` + Installed *Config3LO `json:"installed"` + UniverseDomain string `json:"universe_domain"` +} + +// ServiceAccountFile representation. +type ServiceAccountFile struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientID string `json:"client_id"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} + +// UserCredentialsFile representation. +type UserCredentialsFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + QuotaProjectID string `json:"quota_project_id"` + RefreshToken string `json:"refresh_token"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountFile representation. +type ExternalAccountFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + TokenURL string `json:"token_url"` + CredentialSource *CredentialSource `json:"credential_source,omitempty"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountAuthorizedUserFile representation. +type ExternalAccountAuthorizedUserFile struct { + Type string `json:"type"` + Audience string `json:"audience"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + TokenURL string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + RevokeURL string `json:"revoke_url"` + QuotaProjectID string `json:"quota_project_id"` + UniverseDomain string `json:"universe_domain"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` +} + +// Format describes the format of a [CredentialSource]. +type Format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// ExecutableConfig represents the command to run for an executable +// [CredentialSource]. +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + +// ServiceAccountImpersonationInfo has impersonation configuration. +type ServiceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +// ImpersonatedServiceAccountFile representation. +type ImpersonatedServiceAccountFile struct { + Type string `json:"type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` + CredSource json.RawMessage `json:"source_credentials"` + UniverseDomain string `json:"universe_domain"` +} + +// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file. +type GDCHServiceAccountFile struct { + Type string `json:"type"` + FormatVersion string `json:"format_version"` + Project string `json:"project"` + Name string `json:"name"` + CertPath string `json:"ca_cert_path"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go new file mode 100644 index 0000000000..a02b9f5df7 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// ParseServiceAccount parses bytes into a [ServiceAccountFile]. +func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) { + var f *ServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseClientCredentials parses bytes into a +// [credsfile.ClientCredentialsFile]. +func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) { + var f *ClientCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseUserCredentials parses bytes into a [UserCredentialsFile]. +func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) { + var f *UserCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccount parses bytes into a [ExternalAccountFile]. +func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) { + var f *ExternalAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccountAuthorizedUser parses bytes into a +// [ExternalAccountAuthorizedUserFile]. +func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) { + var f *ExternalAccountAuthorizedUserFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseImpersonatedServiceAccount parses bytes into a +// [ImpersonatedServiceAccountFile]. +func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) { + var f *ImpersonatedServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile]. +func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) { + var f *GDCHServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +type fileTypeChecker struct { + Type string `json:"type"` +} + +// ParseFileType determines the [CredentialType] based on bytes provided. +func ParseFileType(b []byte) (CredentialType, error) { + var f fileTypeChecker + if err := json.Unmarshal(b, &f); err != nil { + return 0, err + } + return parseCredentialType(f.Type), nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go new file mode 100644 index 0000000000..6f4ef43bba --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -0,0 +1,219 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const ( + // TokenTypeBearer is the auth header prefix for bearer tokens. + TokenTypeBearer = "Bearer" + + // QuotaProjectEnvVar is the environment variable for setting the quota + // project. + QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 + + // DefaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + DefaultUniverseDomain = "googleapis.com" +) + +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + + return &http.Client{ + Transport: http.DefaultTransport, + Timeout: 30 * time.Second, + } +} + +// ParseKey converts the binary contents of a private key file +// to an crypto.Signer. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (crypto.Signer, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + var parsedKey crypto.PrivateKey + var err error + parsedKey, err = x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) + } + } + parsed, ok := parsedKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key is not a signer") + } + return parsed, nil +} + +// GetQuotaProject retrieves quota project with precedence being: override, +// environment variable, creds json file. +func GetQuotaProject(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(QuotaProjectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + return v.QuotaProject +} + +// GetProjectID retrieves project with precedence being: override, +// environment variable, creds json file. +func GetProjectID(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(projectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + ProjectID string `json:"project_id"` // standard service account key + Project string `json:"project"` // gdch key + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + if v.ProjectID != "" { + return v.ProjectID + } + return v.Project +} + +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + +// ReadAll consumes the whole reader and safely reads the content of its body +// with some overflow protection. +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodySize)) +} + +// StaticCredentialsProperty is a helper for creating static credentials +// properties. +func StaticCredentialsProperty(s string) StaticProperty { + return StaticProperty(s) +} + +// StaticProperty always returns that value of the underlying string. +type StaticProperty string + +// GetProperty loads the properly value provided the given context. +func (p StaticProperty) GetProperty(context.Context) (string, error) { + return string(p), nil +} + +// ComputeUniverseDomainProvider fetches the credentials universe domain from +// the google cloud metadata service. +type ComputeUniverseDomainProvider struct { + MetadataClient *metadata.Client + universeDomainOnce sync.Once + universeDomain string + universeDomainErr error +} + +// GetProperty fetches the credentials universe domain from the google cloud +// metadata service. +func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { + c.universeDomainOnce.Do(func() { + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) + }) + if c.universeDomainErr != nil { + return "", c.universeDomainErr + } + return c.universeDomain, nil +} + +// httpGetMetadataUniverseDomain is a package var for unit test substitution. +var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return client.GetWithContext(ctx, "universe/universe-domain") +} + +func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) + if err == nil { + return universeDomain, nil + } + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return DefaultUniverseDomain, nil + } + return "", err +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go new file mode 100644 index 0000000000..9bd55f510c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +const ( + // HeaderAlgRSA256 is the RS256 [Header.Algorithm]. + HeaderAlgRSA256 = "RS256" + // HeaderAlgES256 is the ES256 [Header.Algorithm]. + HeaderAlgES256 = "ES256" + // HeaderType is the standard [Header.Type]. + HeaderType = "JWT" +) + +// Header represents a JWT header. +type Header struct { + Algorithm string `json:"alg"` + Type string `json:"typ"` + KeyID string `json:"kid"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Claims represents the claims set of a JWT. +type Claims struct { + // Iss is the issuer JWT claim. + Iss string `json:"iss"` + // Scope is the scope JWT claim. + Scope string `json:"scope,omitempty"` + // Exp is the expiry JWT claim. If unset, default is in one hour from now. + Exp int64 `json:"exp"` + // Iat is the subject issued at claim. If unset, default is now. + Iat int64 `json:"iat"` + // Aud is the audience JWT claim. Optional. + Aud string `json:"aud"` + // Sub is the subject JWT claim. Optional. + Sub string `json:"sub,omitempty"` + // AdditionalClaims contains any additional non-standard JWT claims. Optional. + AdditionalClaims map[string]interface{} `json:"-"` +} + +func (c *Claims) encode() (string, error) { + // Compensate for skew + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.AdditionalClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.AdditionalClaims) + if err != nil { + return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// EncodeJWS encodes the data using the provided key as a JSON web signature. +func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + claims, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, claims) + h := sha256.New() + h.Write([]byte(ss)) + sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// DecodeJWS decodes a claim set from a JWS payload. +func DecodeJWS(payload string) (*Claims, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + return nil, errors.New("invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &Claims{} + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil { + return nil, err + } + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil { + return nil, err + } + return c, err +} + +// VerifyJWS tests whether the provided JWT token's signature was produced by +// the private key associated with the provided public key. +func VerifyJWS(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jwt: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go new file mode 100644 index 0000000000..2f922f7dfe --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -0,0 +1,368 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "log" + "log/slog" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/cert" + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE" + googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT" + googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" +) + +// Options is a struct that is duplicated information from the individual +// transport packages in order to avoid cyclic deps. It correlates 1:1 with +// fields on httptransport.Options and grpctransport.Options. +type Options struct { + Endpoint string + DefaultEndpointTemplate string + DefaultMTLSEndpoint string + ClientCertProvider cert.Provider + Client *http.Client + UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool + Logger *slog.Logger +} + +// getUniverseDomain returns the default service domain for a given Cloud +// universe. +func (o *Options) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *Options) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +// defaultEndpoint returns the DefaultEndpointTemplate merged with the +// universe domain if the DefaultEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultEndpoint() string { + if o.DefaultEndpointTemplate == "" { + return "" + } + return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the +// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultMTLSEndpoint() string { + if o.DefaultMTLSEndpoint == "" { + return "" + } + return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the +// default endpoint. +func (o *Options) mergedEndpoint() (string, error) { + defaultEndpoint := o.defaultEndpoint() + u, err := url.Parse(fixScheme(defaultEndpoint)) + if err != nil { + return "", err + } + return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportCredsAndEndpoint returns an instance of +// [google.golang.org/grpc/credentials.TransportCredentials], and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfig returns a client certificate source and a function for +// dialing MTLS with S2A. +func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, nil, err + } + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, nil +} + +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + +func getTransportConfig(opts *Options) (*transportConfig, error) { + clientCertSource, err := GetClientCertificateProvider(opts) + if err != nil { + return nil, err + } + endpoint, err := getEndpoint(opts, clientCertSource) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + } + + if !shouldUseS2A(clientCertSource, opts) { + return &defaultTransportConfig, nil + } + + s2aAddress := GetS2AAddress(opts.Logger) + mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) + if s2aAddress == "" && mtlsS2AAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.defaultMTLSEndpoint(), + }, nil +} + +// GetClientCertificateProvider returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { + return nil, nil + } else if opts.ClientCertProvider != nil { + return opts.ClientCertProvider, nil + } + return cert.DefaultProvider() + +} + +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() +} + +type transportConfig struct { + // The client certificate source. + clientCertSource cert.Provider + // The corresponding endpoint to use based on client certificate source. + endpoint string + // The plaintext S2A address if it can be used, otherwise an empty string. + s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string + // The MTLS endpoint to use with S2A. + s2aMTLSEndpoint string +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default +// endpoint or the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mTLS vs. regular) by setting +// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return +// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS +// endpoint. +func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { + if opts.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return opts.defaultMTLSEndpoint(), nil + } + return opts.defaultEndpoint(), nil + } + if strings.Contains(opts.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return opts.Endpoint, nil + } + if opts.defaultEndpoint() == "" { + // If DefaultEndpointTemplate is not configured, + // use the user provided endpoint verbatim. This allows a naked + // "host[:port]" URL to be used with GRPC Direct Path. + return opts.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return opts.mergedEndpoint() +} + +func getMTLSMode() string { + mode := os.Getenv(googleAPIUseMTLS) + if mode == "" { + mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go new file mode 100644 index 0000000000..5cedc50f1e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certificate provider created by [DefaultProvider]. +// +// A singleton model is used to allow the provider to be reused +// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil) +// may be returned to indicate a default provider could not be found, which +// will skip extra tls config in the transport layer . +type defaultCertData struct { + once sync.Once + provider Provider + err error +} + +var ( + defaultCert defaultCertData +) + +// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultProvider() (Provider, error) { + defaultCert.once.Do(func() { + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } + } + } + }) + return defaultCert.provider, defaultCert.err +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go new file mode 100644 index 0000000000..6c954ae193 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -0,0 +1,54 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxyProvider creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { + key, err := client.Cred(configFilePath) + if err != nil { + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go new file mode 100644 index 0000000000..738cb21618 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -0,0 +1,124 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectProvider creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 0000000000..347aaced72 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,114 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + return "", "", errSourceUnavailable + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errSourceUnavailable + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go new file mode 100644 index 0000000000..a633099563 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -0,0 +1,138 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "fmt" + "log" + "log/slog" + "os" + "strconv" + "sync" + + "cloud.google.com/go/auth/internal/transport/cert" + "cloud.google.com/go/compute/metadata" +) + +const ( + configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" +) + +var ( + mtlsConfiguration *mtlsConfig + + mtlsOnce sync.Once +) + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +// Returns empty string if not set or invalid. +func GetS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.PlaintextAddress +} + +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` +} + +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +func getMetadataMTLSAutoConfig(logger *slog.Logger) { + var err error + mtlsOnce.Do(func() { + mtlsConfiguration, err = queryConfig(logger) + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) + } + }) +} + +var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: logger, + }) + return metadataClient.GetWithContext(context.Background(), configEndpointSuffix) +} + +func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig(logger) + if err != nil { + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) + } + if config.S2A == nil { + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) + } + return &config, nil +} + +func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if opts.Client != nil { + return false + } + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv)) + if err != nil { + return false + } + return b +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go new file mode 100644 index 0000000000..992ac40df0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -0,0 +1,106 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provided internal helpers for the two transport packages +// (grpctransport and httptransport). +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth/credentials" +) + +// CloneDetectOptions clones a user set detect option into some new memory that +// we can internally manipulate before sending onto the detect package. +func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { + if oldDo == nil { + // it is valid for users not to set this, but we will need to to default + // some options for them in this case so return some initialized memory + // to work with. + return &credentials.DetectOptions{} + } + newDo := &credentials.DetectOptions{ + // Simple types + Audience: oldDo.Audience, + Subject: oldDo.Subject, + EarlyTokenRefresh: oldDo.EarlyTokenRefresh, + TokenURL: oldDo.TokenURL, + STSAudience: oldDo.STSAudience, + CredentialsFile: oldDo.CredentialsFile, + UseSelfSignedJWT: oldDo.UseSelfSignedJWT, + UniverseDomain: oldDo.UniverseDomain, + + // These fields are are pointer types that we just want to use exactly + // as the user set, copy the ref + Client: oldDo.Client, + Logger: oldDo.Logger, + AuthHandlerOptions: oldDo.AuthHandlerOptions, + } + + // Smartly size this memory and copy below. + if len(oldDo.CredentialsJSON) > 0 { + newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) + copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) + } + if len(oldDo.Scopes) > 0 { + newDo.Scopes = make([]string, len(oldDo.Scopes)) + copy(newDo.Scopes, oldDo.Scopes) + } + + return newDo +} + +// ValidateUniverseDomain verifies that the universe domain configured for the +// client matches the universe domain configured for the credentials. +func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error { + if clientUniverseDomain != credentialsUniverseDomain { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "the universe domain explicitly, \"googleapis.com\" is the default", + clientUniverseDomain, + credentialsUniverseDomain) + } + return nil +} + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := BaseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md new file mode 100644 index 0000000000..a1ef292379 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -0,0 +1,68 @@ +# Changelog + +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16) + + +### Features + +* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## 0.1.0 (2023-10-19) + + +### Features + +* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f)) +* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go new file mode 100644 index 0000000000..9cc33e5ee6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -0,0 +1,200 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth] +// and [golang.org/x/oauth2]. +package oauth2adapt + +import ( + "context" + "encoding/json" + "errors" + + "cloud.google.com/go/auth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + +// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] +// into a [cloud.google.com/go/auth.TokenProvider]. +func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { + return &tokenProviderAdapter{ts: ts} +} + +type tokenProviderAdapter struct { + ts oauth2.TokenSource +} + +// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It +// is a light wrapper around the underlying TokenSource. +func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { + tok, err := tp.ts.Token() + if err != nil { + var err2 *oauth2.RetrieveError + if ok := errors.As(err, &err2); ok { + return nil, AuthErrorFromRetrieveError(err2) + } + return nil, err + } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } + return &auth.Token{ + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, + }, nil +} + +// TokenSourceFromTokenProvider converts any +// [cloud.google.com/go/auth.TokenProvider] into a +// [golang.org/x/oauth2.TokenSource]. +func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource { + return &tokenSourceAdapter{tp: tp} +} + +type tokenSourceAdapter struct { + tp auth.TokenProvider +} + +// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It +// is a light wrapper around the underlying TokenProvider. +func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { + tok, err := ts.tp.Token(context.Background()) + if err != nil { + var err2 *auth.Error + if ok := errors.As(err, &err2); ok { + return nil, AddRetrieveErrorToAuthError(err2) + } + return nil, err + } + tok2 := &oauth2.Token{ + AccessToken: tok.Value, + TokenType: tok.Type, + Expiry: tok.Expiry, + } + // Preserve token metadata. + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil +} + +// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] +// to a [cloud.google.com/go/auth.Credentials]. +func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials { + if creds == nil { + return nil + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: TokenProviderFromTokenSource(creds.TokenSource), + JSON: creds.JSON, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.ProjectID, nil + }), + UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.GetUniverseDomain() + }), + }) +} + +// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials] +// to a [golang.org/x/oauth2/google.Credentials]. +func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials { + if creds == nil { + return nil + } + // Throw away errors as old credentials are not request aware. Also, no + // network requests are currently happening for this use case. + projectID, _ := creds.ProjectID(context.Background()) + + return &google.Credentials{ + TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider), + ProjectID: projectID, + JSON: creds.JSON(), + UniverseDomainProvider: func() (string, error) { + return creds.UniverseDomain(context.Background()) + }, + } +} + +type oauth2Error struct { + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +// AddRetrieveErrorToAuthError returns the same error provided and adds a +// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the +// [cloud.google.com/go/auth.Error]. +func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error { + if err == nil { + return nil + } + e := &oauth2.RetrieveError{ + Response: err.Response, + Body: err.Body, + } + err.Err = e + if len(err.Body) > 0 { + var oErr oauth2Error + // ignore the error as it only fills in extra details + json.Unmarshal(err.Body, &oErr) + e.ErrorCode = oErr.ErrorCode + e.ErrorDescription = oErr.ErrorDescription + e.ErrorURI = oErr.ErrorURI + } + return err +} + +// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that +// wraps the provided [golang.org/x/oauth2.RetrieveError]. +func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error { + if err == nil { + return nil + } + return &auth.Error{ + Response: err.Response, + Body: err.Body, + Err: err, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go new file mode 100644 index 0000000000..07804dc162 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -0,0 +1,382 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for +// OAuth consent at the specified auth code URL and returns an auth code and +// state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// Options3LO are the options for doing a 3-legged OAuth2 flow. +type Options3LO struct { + // ClientID is the application's ID. + ClientID string + // ClientSecret is the application's secret. Not required if AuthHandlerOpts + // is set. + ClientSecret string + // AuthURL is the URL for authenticating. + AuthURL string + // TokenURL is the URL for retrieving a token. + TokenURL string + // AuthStyle is used to describe how to client info in the token request. + AuthStyle Style + // RefreshToken is the token used to refresh the credential. Not required + // if AuthHandlerOpts is set. + RefreshToken string + // RedirectURL is the URL to redirect users to. Optional. + RedirectURL string + // Scopes specifies requested permissions for the Token. Optional. + Scopes []string + + // URLParams are the set of values to apply to the token exchange. Optional. + URLParams url.Values + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // EarlyTokenExpiry is the time before the token expires that it should be + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. + EarlyTokenExpiry time.Duration + + // AuthHandlerOpts provides a set of options for doing a + // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. + AuthHandlerOpts *AuthorizationHandlerOptions + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger +} + +func (o *Options3LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.ClientID == "" { + return errors.New("auth: client ID must be provided") + } + if o.AuthHandlerOpts == nil && o.ClientSecret == "" { + return errors.New("auth: client secret must be provided") + } + if o.AuthURL == "" { + return errors.New("auth: auth URL must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + if o.AuthStyle == StyleUnknown { + return errors.New("auth: auth style must be provided") + } + if o.AuthHandlerOpts == nil && o.RefreshToken == "" { + return errors.New("auth: refresh token must be provided") + } + return nil +} + +func (o *Options3LO) logger() *slog.Logger { + return internallog.New(o.Logger) +} + +// PKCEOptions holds parameters to support PKCE. +type PKCEOptions struct { + // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. + Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier. + // ChallengeMethod is the encryption method (ex. S256). + ChallengeMethod string + // Verifier is the original, non-encrypted secret. + Verifier string // The original, non-encrypted secret. +} + +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + // error fields + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +func (o *Options3LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +// authCodeURL returns a URL that points to a OAuth2 consent page. +func (o *Options3LO) authCodeURL(state string, values url.Values) string { + var buf bytes.Buffer + buf.WriteString(o.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {o.ClientID}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if len(o.Scopes) > 0 { + v.Set("scope", strings.Join(o.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + if o.AuthHandlerOpts != nil { + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Challenge != "" { + v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge) + } + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" { + v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod) + } + } + for k := range values { + v.Set(k, v.Get(k)) + } + if strings.Contains(o.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2 +// configuration. The TokenProvider is caches and auto-refreshes tokens by +// default. +func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.AuthHandlerOpts != nil { + return new3LOTokenProviderWithAuthHandler(opts), nil + } + return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }), nil +} + +// AuthorizationHandlerOptions provides a set of options to specify for doing a +// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. +type AuthorizationHandlerOptions struct { + // AuthorizationHandler specifies the handler used to for the authorization + // part of the flow. + Handler AuthorizationHandler + // State is used verify that the "state" is identical in the request and + // response before exchanging the auth code for OAuth2 token. + State string + // PKCEOpts allows setting configurations for PKCE. Optional. + PKCEOpts *PKCEOptions +} + +func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider { + return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }) +} + +// exchange handles the final exchange portion of the 3lo flow. Returns a Token, +// refreshToken, and error. +func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) { + // Build request + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if o.AuthHandlerOpts != nil && + o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Verifier != "" { + v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier) + } + for k := range o.URLParams { + v.Set(k, o.URLParams.Get(k)) + } + return fetchToken(ctx, o, v) +} + +// This struct is not safe for concurrent access alone, but the way it is used +// in this package by wrapping it with a cachedTokenProvider makes it so. +type tokenProvider3LO struct { + opts *Options3LO + client *http.Client + refreshToken string +} + +func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) { + if tp.refreshToken == "" { + return nil, errors.New("auth: token expired and refresh token is not set") + } + v := url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tp.refreshToken}, + } + for k := range tp.opts.URLParams { + v.Set(k, tp.opts.URLParams.Get(k)) + } + + tk, rt, err := fetchToken(ctx, tp.opts, v) + if err != nil { + return nil, err + } + if tp.refreshToken != rt && rt != "" { + tp.refreshToken = rt + } + return tk, err +} + +type tokenProviderWithHandler struct { + opts *Options3LO + state string +} + +func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) { + url := tp.opts.authCodeURL(tp.state, nil) + code, state, err := tp.opts.AuthHandlerOpts.Handler(url) + if err != nil { + return nil, err + } + if state != tp.state { + return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow") + } + tok, _, err := tp.opts.exchange(ctx, code) + return tok, err +} + +// fetchToken returns a Token, refresh token, and/or an error. +func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) { + var refreshToken string + if o.AuthStyle == StyleInParams { + if o.ClientID != "" { + v.Set("client_id", o.ClientID) + } + if o.ClientSecret != "" { + v.Set("client_secret", o.ClientSecret) + } + } + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, refreshToken, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if o.AuthStyle == StyleInHeader { + req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) + } + logger := o.logger() + + logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) + // Make request + resp, body, err := internal.DoRequest(o.client(), req) + if err != nil { + return nil, refreshToken, err + } + logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 + tokError := &Error{ + Response: resp, + Body: body, + } + + var token *Token + // errors ignored because of default switch on content + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err) + } + tokError.code = vals.Get("error") + tokError.description = vals.Get("error_description") + tokError.uri = vals.Get("error_uri") + token = &Token{ + Value: vals.Get("access_token"), + Type: vals.Get("token_type"), + Metadata: make(map[string]interface{}, len(vals)), + } + for k, v := range vals { + token.Metadata[k] = v + } + refreshToken = vals.Get("refresh_token") + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err) + } + tokError.code = tj.ErrorCode + tokError.description = tj.ErrorDescription + tokError.uri = tj.ErrorURI + token = &Token{ + Value: tj.AccessToken, + Type: tj.TokenType, + Expiry: tj.expiry(), + Metadata: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Metadata) // optional field, skip err check + refreshToken = tj.RefreshToken + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || tokError.code != "" { + return nil, refreshToken, tokError + } + if token.Value == "" { + return nil, refreshToken, errors.New("auth: server response missing access_token") + } + return token, refreshToken, nil +} diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e060747..bcfb5d8165 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,45 @@ # Changes +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) + + +### Features + +* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go new file mode 100644 index 0000000000..8ec673b882 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/log.go @@ -0,0 +1,149 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" +) + +// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog +// to avoid the dependency. The compute/metadata module is used by too many +// non-client library modules that can't justify the dependency. + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} + +// httpRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func httpRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// httpResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func httpResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7eea..4c18a383a4 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,11 +24,11 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -61,7 +61,10 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: newDefaultHTTPClient()} +var defaultClient = &Client{ + hc: newDefaultHTTPClient(), + logger: slog.New(noOpHandler{}), +} func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -88,16 +91,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +113,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,21 +193,9 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +218,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} + +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -286,18 +412,42 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { - hc *http.Client + hc *http.Client + logger *slog.Logger +} + +// Options for configuring a [Client]. +type Options struct { + // Client is the HTTP client used to make requests. Optional. + Client *http.Client + // Logger is used to log information about HTTP request and responses. + // If not provided, nothing will be logged. Optional. + Logger *slog.Logger } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { - if c == nil { + return NewWithOptions(&Options{ + Client: c, + }) +} + +// NewWithOptions returns a Client that is configured with the provided Options. +func NewWithOptions(opts *Options) *Client { + if opts == nil { return defaultClient } - - return &Client{hc: c} + client := opts.Client + if client == nil { + client = newDefaultHTTPClient() + } + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: client, logger: logger} } // getETag returns a value from the metadata service as well as the associated ETag. @@ -327,14 +477,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error + var body []byte retryer := newRetryer() for { + c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode + body, err = io.ReadAll(res.Body) + if err != nil { + res.Body.Close() + return "", "", err + } + c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) + res.Body.Close() } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } @@ -345,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } - defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} + return "", "", &Error{Code: res.StatusCode, Message: string(body)} } - return string(all), res.Header.Get("Etag"), nil + return string(body), res.Header.Get("Etag"), nil } // Get returns a value from the metadata service. @@ -381,6 +538,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +553,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +566,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +674,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +705,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +742,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +768,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917..2e53f01230 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 0000000000..e0704fa647 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go new file mode 100644 index 0000000000..74689acbbb --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -0,0 +1,28 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package metadata + +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 0000000000..c0ce627872 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go index be257cb277..ae1dd6b9a2 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -54,6 +54,7 @@ func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -272,7 +273,9 @@ func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn { func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go index e18457af4a..da216081d5 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -56,6 +56,7 @@ func defaultGroupGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -293,7 +294,9 @@ func (c *groupGRPCClient) Connection() *grpc.ClientConn { func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go index f38f723983..d43d261d18 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -60,6 +60,7 @@ func defaultMetricGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -201,31 +202,32 @@ func (c *MetricClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } -// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Workspace. +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...) } -// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Workspace. +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...) } -// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Workspace. +// ListMetricDescriptors lists metric descriptors that match a filter. func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { return c.internalClient.ListMetricDescriptors(ctx, req, opts...) } -// GetMetricDescriptor gets a single metric descriptor. This method does not require a Workspace. +// GetMetricDescriptor gets a single metric descriptor. func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { return c.internalClient.GetMetricDescriptor(ctx, req, opts...) } // CreateMetricDescriptor creates a new metric descriptor. -// The creation is executed asynchronously and callers may check the returned -// operation to track its progress. +// The creation is executed asynchronously. // User-created metric descriptors define // custom metrics (at https://cloud.google.com/monitoring/custom-metrics). +// The metric descriptor is updated if it already exists, +// except that metric labels are never removed. func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { return c.internalClient.CreateMetricDescriptor(ctx, req, opts...) } @@ -237,7 +239,7 @@ func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitori return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...) } -// ListTimeSeries lists time series that match a filter. This method does not require a Workspace. +// ListTimeSeries lists time series that match a filter. func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { return c.internalClient.ListTimeSeries(ctx, req, opts...) } @@ -246,6 +248,9 @@ func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.Lis // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. +// This method does not support +// resource locations constraint of an organization +// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { return c.internalClient.CreateTimeSeries(ctx, req, opts...) } @@ -327,7 +332,9 @@ func (c *metricGRPCClient) Connection() *grpc.ClientConn { func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index 3dec704677..e7b3595fa6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/alert.proto package monitoringpb @@ -449,8 +449,8 @@ func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity { return AlertPolicy_SEVERITY_UNSPECIFIED } -// A content string and a MIME type that describes the content string's -// format. +// Documentation that is included in the notifications and incidents +// pertaining to this policy. type AlertPolicy_Documentation struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -480,6 +480,9 @@ type AlertPolicy_Documentation struct { // If this field is missing or empty, a default subject line will be // generated. Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Optional. Links to content such as playbooks, repositories, and other + // resources. This field can contain up to 3 entries. + Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"` } func (x *AlertPolicy_Documentation) Reset() { @@ -535,6 +538,13 @@ func (x *AlertPolicy_Documentation) GetSubject() string { return "" } +func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link { + if x != nil { + return x.Links + } + return nil +} + // A condition is a true/false test that determines when an alerting policy // should open an incident. If a condition evaluates to true, it signifies // that something is wrong. @@ -786,6 +796,69 @@ func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPol return nil } +// Links to content such as playbooks, repositories, and other resources. +type AlertPolicy_Documentation_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short display name for the link. The display name must not be empty + // or exceed 63 characters. Example: "playbook". + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The url of a webpage. + // A url can be templatized by using variables + // in the path or the query parameters. The total length of a URL should + // not exceed 2083 characters before and after variable expansion. + // Example: "https://my_domain.com/playbook?name=${resource.name}" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlertPolicy_Documentation_Link) Reset() { + *x = AlertPolicy_Documentation_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation_Link) ProtoMessage() {} + +func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *AlertPolicy_Documentation_Link) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy_Documentation_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + // Specifies how many time series must fail a predicate to trigger a // condition. If not specified, then a `{count: 1}` trigger is used. type AlertPolicy_Condition_Trigger struct { @@ -805,7 +878,7 @@ type AlertPolicy_Condition_Trigger struct { func (x *AlertPolicy_Condition_Trigger) Reset() { *x = AlertPolicy_Condition_Trigger{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -818,7 +891,7 @@ func (x *AlertPolicy_Condition_Trigger) String() string { func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -961,14 +1034,15 @@ type AlertPolicy_Condition_MetricThreshold struct { // are specified. Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` // A condition control that determines how metric-threshold conditions - // are evaluated when data stops arriving. + // are evaluated when data stops arriving. To use this control, the value + // of the `duration` field must be greater than or equal to 60 seconds. EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` } func (x *AlertPolicy_Condition_MetricThreshold) Reset() { *x = AlertPolicy_Condition_MetricThreshold{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -981,7 +1055,7 @@ func (x *AlertPolicy_Condition_MetricThreshold) String() string { func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1119,7 +1193,7 @@ type AlertPolicy_Condition_MetricAbsence struct { func (x *AlertPolicy_Condition_MetricAbsence) Reset() { *x = AlertPolicy_Condition_MetricAbsence{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1132,7 +1206,7 @@ func (x *AlertPolicy_Condition_MetricAbsence) String() string { func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1206,7 +1280,7 @@ type AlertPolicy_Condition_LogMatch struct { func (x *AlertPolicy_Condition_LogMatch) Reset() { *x = AlertPolicy_Condition_LogMatch{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1219,7 +1293,7 @@ func (x *AlertPolicy_Condition_LogMatch) String() string { func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {} func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1285,7 +1359,7 @@ type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct { func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() { *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1298,7 +1372,7 @@ func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {} func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1436,7 +1510,7 @@ type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct { func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() { *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1449,7 +1523,7 @@ func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {} func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1526,7 +1600,7 @@ type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct { func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() { *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1539,7 +1613,7 @@ func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1576,7 +1650,7 @@ type AlertPolicy_AlertStrategy_NotificationRateLimit struct { func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() { *x = AlertPolicy_AlertStrategy_NotificationRateLimit{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[14] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1589,7 +1663,7 @@ func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string { func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {} func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[14] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1635,7 +1709,7 @@ type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() { *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1648,7 +1722,7 @@ func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {} func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1698,7 +1772,7 @@ var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x29, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x2b, 0x0a, 0x0b, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, @@ -1753,301 +1827,310 @@ var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x65, - 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6d, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x69, - 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, - 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, - 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, - 0x6e, 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, - 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, - 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, - 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, - 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, - 0x12, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, - 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, - 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, - 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, - 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, - 0x74, 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, - 0x74, 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, - 0x1a, 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, - 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, - 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xf3, + 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, + 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, - 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, 0x0a, + 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, + 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, - 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, - 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, + 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, - 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, - 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, - 0x6c, 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, - 0x23, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, - 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, - 0x41, 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, - 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, - 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, - 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, - 0x50, 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, - 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, - 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, - 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, - 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, - 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, - 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, - 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, - 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x1a, + 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, + 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, - 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, - 0x44, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, - 0x4e, 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, - 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, - 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x7d, 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, - 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, 0x20, + 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x23, + 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x45, + 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, + 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, + 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, - 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x42, 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, - 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, - 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2063,8 +2146,8 @@ func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_alert_proto_goTypes = []any{ (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData @@ -2073,68 +2156,70 @@ var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ (*AlertPolicy_Condition)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition (*AlertPolicy_AlertStrategy)(nil), // 6: google.monitoring.v3.AlertPolicy.AlertStrategy nil, // 7: google.monitoring.v3.AlertPolicy.UserLabelsEntry - (*AlertPolicy_Condition_Trigger)(nil), // 8: google.monitoring.v3.AlertPolicy.Condition.Trigger - (*AlertPolicy_Condition_MetricThreshold)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - (*AlertPolicy_Condition_MetricAbsence)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - (*AlertPolicy_Condition_LogMatch)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.LogMatch - (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition - (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition - (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions - nil, // 15: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry - nil, // 16: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry - (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit - (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy - (*wrapperspb.BoolValue)(nil), // 19: google.protobuf.BoolValue - (*status.Status)(nil), // 20: google.rpc.Status - (*MutationRecord)(nil), // 21: google.monitoring.v3.MutationRecord - (*durationpb.Duration)(nil), // 22: google.protobuf.Duration - (*Aggregation)(nil), // 23: google.monitoring.v3.Aggregation - (ComparisonType)(0), // 24: google.monitoring.v3.ComparisonType + (*AlertPolicy_Documentation_Link)(nil), // 8: google.monitoring.v3.AlertPolicy.Documentation.Link + (*AlertPolicy_Condition_Trigger)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.Trigger + (*AlertPolicy_Condition_MetricThreshold)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + (*AlertPolicy_Condition_MetricAbsence)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + (*AlertPolicy_Condition_LogMatch)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.LogMatch + (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + nil, // 16: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + nil, // 17: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*status.Status)(nil), // 21: google.rpc.Status + (*MutationRecord)(nil), // 22: google.monitoring.v3.MutationRecord + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (*Aggregation)(nil), // 24: google.monitoring.v3.Aggregation + (ComparisonType)(0), // 25: google.monitoring.v3.ComparisonType } var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ 4, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation 7, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry 5, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType - 19, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue - 20, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status - 21, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord - 21, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord + 20, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue + 21, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status + 22, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord + 22, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord 6, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity - 9, // 10: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch - 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition - 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition - 17, // 15: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit - 22, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration - 18, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy - 23, // 18: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation - 23, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation - 14, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions - 24, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType - 22, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration - 8, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 2, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData - 23, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation - 22, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration - 8, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 15, // 28: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry - 22, // 29: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration - 8, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 2, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData - 22, // 32: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration - 22, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration - 16, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry - 22, // 35: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration - 22, // 36: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration - 22, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration - 38, // [38:38] is the sub-list for method output_type - 38, // [38:38] is the sub-list for method input_type - 38, // [38:38] is the sub-list for extension type_name - 38, // [38:38] is the sub-list for extension extendee - 0, // [0:38] is the sub-list for field type_name + 8, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link + 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch + 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + 14, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + 18, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + 23, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration + 19, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + 24, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation + 24, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation + 15, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + 25, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType + 23, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration + 9, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 24, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation + 23, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration + 9, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 16, // 29: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + 23, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 9, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 23, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 23, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration + 17, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + 23, // 36: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration + 23, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration + 23, // 38: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_google_monitoring_v3_alert_proto_init() } @@ -2145,7 +2230,7 @@ func file_google_monitoring_v3_alert_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_mutation_record_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy); i { case 0: return &v.state @@ -2157,7 +2242,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Documentation); i { case 0: return &v.state @@ -2169,7 +2254,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition); i { case 0: return &v.state @@ -2181,7 +2266,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy); i { case 0: return &v.state @@ -2193,7 +2278,19 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_Trigger); i { case 0: return &v.state @@ -2205,7 +2302,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { case 0: return &v.state @@ -2217,7 +2314,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { case 0: return &v.state @@ -2229,7 +2326,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_LogMatch); i { case 0: return &v.state @@ -2241,7 +2338,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MonitoringQueryLanguageCondition); i { case 0: return &v.state @@ -2253,7 +2350,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_PrometheusQueryLanguageCondition); i { case 0: return &v.state @@ -2265,7 +2362,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricThreshold_ForecastOptions); i { case 0: return &v.state @@ -2277,7 +2374,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy_NotificationRateLimit); i { case 0: return &v.state @@ -2289,7 +2386,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy_NotificationChannelStrategy); i { case 0: return &v.state @@ -2302,14 +2399,14 @@ func file_google_monitoring_v3_alert_proto_init() { } } } - file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{ (*AlertPolicy_Condition_ConditionThreshold)(nil), (*AlertPolicy_Condition_ConditionAbsent)(nil), (*AlertPolicy_Condition_ConditionMatchedLog)(nil), (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil), (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil), } - file_google_monitoring_v3_alert_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{ (*AlertPolicy_Condition_Trigger_Count)(nil), (*AlertPolicy_Condition_Trigger_Percent)(nil), } @@ -2319,7 +2416,7 @@ func file_google_monitoring_v3_alert_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, NumEnums: 3, - NumMessages: 16, + NumMessages: 17, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index 07891b2bbb..f0e149d16b 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/alert_service.proto package monitoringpb @@ -647,7 +647,7 @@ func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_google_monitoring_v3_alert_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_alert_service_proto_goTypes = []any{ (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest @@ -687,7 +687,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { } file_google_monitoring_v3_alert_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateAlertPolicyRequest); i { case 0: return &v.state @@ -699,7 +699,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetAlertPolicyRequest); i { case 0: return &v.state @@ -711,7 +711,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListAlertPoliciesRequest); i { case 0: return &v.state @@ -723,7 +723,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListAlertPoliciesResponse); i { case 0: return &v.state @@ -735,7 +735,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateAlertPolicyRequest); i { case 0: return &v.state @@ -747,7 +747,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteAlertPolicyRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index 60dcbc873e..c9aa5a0247 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/common.proto package monitoringpb @@ -108,7 +108,7 @@ func (ComparisonType) EnumDescriptor() ([]byte, []int) { return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} } -// The tier of service for a Workspace. Please see the +// The tier of service for a Metrics Scope. Please see the // [service tiers // documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more // details. @@ -120,16 +120,16 @@ const ( // An invalid sentinel value, used to indicate that a tier has not // been provided explicitly. ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 - // The Stackdriver Basic tier, a free tier of service that provides basic + // The Cloud Monitoring Basic tier, a free tier of service that provides basic // features, a moderate allotment of logs, and access to built-in metrics. // A number of features are not available in this tier. For more details, // see [the service tiers // documentation](https://cloud.google.com/monitoring/workspaces/tiers). ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 - // The Stackdriver Premium tier, a higher, more expensive tier of service - // that provides access to all Stackdriver features, lets you use Stackdriver - // with AWS accounts, and has a larger allotments for logs and metrics. For - // more details, see [the service tiers + // The Cloud Monitoring Premium tier, a higher, more expensive tier of service + // that provides access to all Cloud Monitoring features, lets you use Cloud + // Monitoring with AWS accounts, and has a larger allotments for logs and + // metrics. For more details, see [the service tiers // documentation](https://cloud.google.com/monitoring/workspaces/tiers). ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 ) @@ -671,8 +671,20 @@ func (*TypedValue_StringValue) isTypedValue_Value() {} func (*TypedValue_DistributionValue) isTypedValue_Value() {} -// A closed time interval. It extends from the start time to the end time, and includes both: `[startTime, endTime]`. Valid time intervals depend on the [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) of the metric value. The end time must not be earlier than the start time. When writing data points, the start time must not be more than 25 hours in the past and the end time must not be more than five minutes in the future. +// Describes a time interval: // +// - Reads: A half-open time interval. It includes the end time but +// excludes the start time: `(startTime, endTime]`. The start time +// must be specified, must be earlier than the end time, and should be +// no older than the data retention period for the metric. +// - Writes: A closed time interval. It extends from the start time to the end +// time, +// and includes both: `[startTime, endTime]`. Valid time intervals +// depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. The end time must not be earlier than the start +// time, and the end time must not be more than 25 hours in the past or more +// than five minutes in the future. // - For `GAUGE` metrics, the `startTime` value is technically optional; if // no value is specified, the start time defaults to the value of the // end time, and the interval represents a single point in time. If both @@ -680,25 +692,23 @@ func (*TypedValue_DistributionValue) isTypedValue_Value() {} // interval is valid only for `GAUGE` metrics, which are point-in-time // measurements. The end time of a new interval must be at least a // millisecond after the end time of the previous interval. -// // - For `DELTA` metrics, the start time and end time must specify a // non-zero interval, with subsequent points specifying contiguous and // non-overlapping intervals. For `DELTA` metrics, the start time of // the next interval must be at least a millisecond after the end time // of the previous interval. -// // - For `CUMULATIVE` metrics, the start time and end time must specify a // non-zero interval, with subsequent points specifying the same // start time and increasing end times, until an event resets the // cumulative value to zero and sets a new start time for the following // points. The new start time must be at least a millisecond after the // end time of the previous interval. -// -// - The start time of a new interval must be at least a millisecond after the +// - The start time of a new interval must be at least a millisecond after +// the // end time of the previous interval because intervals are closed. If the -// start time of a new interval is the same as the end time of the previous -// interval, then data written at the new start time could overwrite data -// written at the previous end time. +// start time of a new interval is the same as the end time of the +// previous interval, then data written at the new start time could +// overwrite data written at the previous end time. type TimeInterval struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1027,20 +1037,20 @@ var file_google_monitoring_v3_common_proto_rawDesc = []byte{ 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, - 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, + 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, - 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1057,7 +1067,7 @@ func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_monitoring_v3_common_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_common_proto_goTypes = []any{ (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner @@ -1089,7 +1099,7 @@ func file_google_monitoring_v3_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*TypedValue); i { case 0: return &v.state @@ -1101,7 +1111,7 @@ func file_google_monitoring_v3_common_proto_init() { return nil } } - file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*TimeInterval); i { case 0: return &v.state @@ -1113,7 +1123,7 @@ func file_google_monitoring_v3_common_proto_init() { return nil } } - file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Aggregation); i { case 0: return &v.state @@ -1126,7 +1136,7 @@ func file_google_monitoring_v3_common_proto_init() { } } } - file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{ (*TypedValue_BoolValue)(nil), (*TypedValue_Int64Value)(nil), (*TypedValue_DoubleValue)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index 7283ebd9f3..7b1dc962da 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/dropped_labels.proto package monitoringpb @@ -144,7 +144,7 @@ func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_monitoring_v3_dropped_labels_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{ (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry } @@ -163,7 +163,7 @@ func file_google_monitoring_v3_dropped_labels_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DroppedLabels); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index 02dbf8c0d8..dff27f9d8c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/group.proto package monitoringpb @@ -214,7 +214,7 @@ func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_group_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_group_proto_goTypes = []any{ (*Group)(nil), // 0: google.monitoring.v3.Group } var file_google_monitoring_v3_group_proto_depIdxs = []int32{ @@ -231,7 +231,7 @@ func file_google_monitoring_v3_group_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Group); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index 9015941f57..46747d9064 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/group_service.proto package monitoringpb @@ -48,8 +48,9 @@ type ListGroupsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) - // whose groups are to be listed. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // groups are to be listed. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` @@ -312,13 +313,14 @@ type CreateGroupRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) in - // which to create the group. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the group. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // Required. A group definition. It is an error to define the `name` field because - // the system assigns the name. + // Required. A group definition. It is an error to define the `name` field + // because the system assigns the name. Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` // If true, validate this request but do not create the group. ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` @@ -383,8 +385,9 @@ type UpdateGroupRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The new definition of the group. All fields of the existing group, - // excepting `name`, are replaced with the corresponding fields of this group. + // Required. The new definition of the group. All fields of the existing + // group, excepting `name`, are replaced with the corresponding fields of this + // group. Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` // If true, validate this request but do not update the existing group. ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` @@ -872,7 +875,7 @@ func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_group_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_group_service_proto_goTypes = []any{ (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest @@ -919,7 +922,7 @@ func file_google_monitoring_v3_group_service_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_group_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListGroupsRequest); i { case 0: return &v.state @@ -931,7 +934,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListGroupsResponse); i { case 0: return &v.state @@ -943,7 +946,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetGroupRequest); i { case 0: return &v.state @@ -955,7 +958,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateGroupRequest); i { case 0: return &v.state @@ -967,7 +970,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateGroupRequest); i { case 0: return &v.state @@ -979,7 +982,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteGroupRequest); i { case 0: return &v.state @@ -991,7 +994,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListGroupMembersRequest); i { case 0: return &v.state @@ -1003,7 +1006,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ListGroupMembersResponse); i { case 0: return &v.state @@ -1016,7 +1019,7 @@ func file_google_monitoring_v3_group_service_proto_init() { } } } - file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{ (*ListGroupsRequest_ChildrenOfGroup)(nil), (*ListGroupsRequest_AncestorsOfGroup)(nil), (*ListGroupsRequest_DescendantsOfGroup)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 25d010abd3..b22c22d07e 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/metric.proto package monitoringpb @@ -992,7 +992,7 @@ func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_google_monitoring_v3_metric_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_metric_proto_goTypes = []any{ (*Point)(nil), // 0: google.monitoring.v3.Point (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor @@ -1047,7 +1047,7 @@ func file_google_monitoring_v3_metric_proto_init() { } file_google_monitoring_v3_common_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Point); i { case 0: return &v.state @@ -1059,7 +1059,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*TimeSeries); i { case 0: return &v.state @@ -1071,7 +1071,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesDescriptor); i { case 0: return &v.state @@ -1083,7 +1083,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesData); i { case 0: return &v.state @@ -1095,7 +1095,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LabelValue); i { case 0: return &v.state @@ -1107,7 +1107,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*QueryError); i { case 0: return &v.state @@ -1119,7 +1119,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TextLocator); i { case 0: return &v.state @@ -1131,7 +1131,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { case 0: return &v.state @@ -1143,7 +1143,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesData_PointData); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*TextLocator_Position); i { case 0: return &v.state @@ -1168,7 +1168,7 @@ func file_google_monitoring_v3_metric_proto_init() { } } } - file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{ (*LabelValue_BoolValue)(nil), (*LabelValue_Int64Value)(nil), (*LabelValue_StringValue)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 126ccb8d3b..52e1c1e0b9 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/metric_service.proto package monitoringpb @@ -44,7 +44,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Controls which fields are returned by `ListTimeSeries`. +// Controls which fields are returned by `ListTimeSeries*`. type ListTimeSeriesRequest_TimeSeriesView int32 const ( @@ -101,8 +101,9 @@ type ListMonitoredResourceDescriptorsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on - // which to execute the request. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` @@ -302,8 +303,9 @@ type ListMetricDescriptorsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on - // which to execute the request. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` @@ -316,7 +318,9 @@ type ListMetricDescriptorsRequest struct { // // metric.type = starts_with("custom.googleapis.com/") Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // A positive number that is the maximum number of results to return. + // A positive number that is the maximum number of results to return. The + // default and maximum value is 10,000. If a page_size <= 0 or > 10,000 is + // submitted, will instead return a maximum of 10,000 results. PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // If this field is not empty then it must contain the `nextPageToken` value // returned by a previous call to this method. Using this field causes the @@ -451,7 +455,8 @@ type GetMetricDescriptorRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The metric descriptor on which to execute the request. The format is: + // Required. The metric descriptor on which to execute the request. The format + // is: // // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] // @@ -505,14 +510,15 @@ type CreateMetricDescriptorRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on - // which to execute the request. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: // 4 // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Required. The new [custom metric](https://cloud.google.com/monitoring/custom-metrics) - // descriptor. + // Required. The new [custom + // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor. MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` } @@ -568,7 +574,8 @@ type DeleteMetricDescriptorRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The metric descriptor on which to execute the request. The format is: + // Required. The metric descriptor on which to execute the request. The format + // is: // // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] // @@ -622,24 +629,26 @@ type ListTimeSeriesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name), + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name), // organization or folder on which to execute the request. The format is: // // projects/[PROJECT_ID_OR_NUMBER] // organizations/[ORGANIZATION_ID] // folders/[FOLDER_ID] Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` - // Required. A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // that specifies which time series should be returned. The filter must - // specify a single metric type, and can additionally specify metric labels - // and other information. For example: + // Required. A [monitoring + // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies + // which time series should be returned. The filter must specify a single + // metric type, and can additionally specify metric labels and other + // information. For example: // // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND // metric.labels.instance_name = "my-instance-name" Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // Required. The time interval for which results should be returned. Only time series - // that contain data points in the specified interval are included - // in the response. + // Required. The time interval for which results should be returned. Only time + // series that contain data points in the specified interval are included in + // the response. Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` // Specifies the alignment of data points in individual time series as // well as how to combine the retrieved time series across specified labels. @@ -852,8 +861,9 @@ type CreateTimeSeriesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on - // which to execute the request. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` @@ -1050,8 +1060,9 @@ type QueryTimeSeriesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on - // which to execute the request. The format is: + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1754,7 +1765,7 @@ func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_google_monitoring_v3_metric_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_metric_service_proto_goTypes = []any{ (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse @@ -1836,7 +1847,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_metric_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { case 0: return &v.state @@ -1848,7 +1859,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { case 0: return &v.state @@ -1860,7 +1871,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetMonitoredResourceDescriptorRequest); i { case 0: return &v.state @@ -1872,7 +1883,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListMetricDescriptorsRequest); i { case 0: return &v.state @@ -1884,7 +1895,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListMetricDescriptorsResponse); i { case 0: return &v.state @@ -1896,7 +1907,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GetMetricDescriptorRequest); i { case 0: return &v.state @@ -1908,7 +1919,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*CreateMetricDescriptorRequest); i { case 0: return &v.state @@ -1920,7 +1931,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteMetricDescriptorRequest); i { case 0: return &v.state @@ -1932,7 +1943,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ListTimeSeriesRequest); i { case 0: return &v.state @@ -1944,7 +1955,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListTimeSeriesResponse); i { case 0: return &v.state @@ -1956,7 +1967,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesRequest); i { case 0: return &v.state @@ -1968,7 +1979,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesError); i { case 0: return &v.state @@ -1980,7 +1991,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesSummary); i { case 0: return &v.state @@ -1992,7 +2003,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*QueryTimeSeriesRequest); i { case 0: return &v.state @@ -2004,7 +2015,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*QueryTimeSeriesResponse); i { case 0: return &v.state @@ -2016,7 +2027,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*QueryErrorList); i { case 0: return &v.state @@ -2028,7 +2039,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesSummary_Error); i { case 0: return &v.state @@ -2074,30 +2085,34 @@ const _ = grpc.SupportPackageIsVersion6 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MetricServiceClient interface { - // Lists monitored resource descriptors that match a filter. This method does not require a Workspace. + // Lists monitored resource descriptors that match a filter. ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) - // Gets a single monitored resource descriptor. This method does not require a Workspace. + // Gets a single monitored resource descriptor. GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) - // Lists metric descriptors that match a filter. This method does not require a Workspace. + // Lists metric descriptors that match a filter. ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) - // Gets a single metric descriptor. This method does not require a Workspace. + // Gets a single metric descriptor. GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) // Creates a new metric descriptor. - // The creation is executed asynchronously and callers may check the returned - // operation to track its progress. + // The creation is executed asynchronously. // User-created metric descriptors define // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) // Deletes a metric descriptor. Only user-created // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be // deleted. DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Lists time series that match a filter. This method does not require a Workspace. + // Lists time series that match a filter. ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates or adds data to one or more service time series. A service time // series is a time series for a metric from a Google Cloud service. The @@ -2202,30 +2217,34 @@ func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *C // MetricServiceServer is the server API for MetricService service. type MetricServiceServer interface { - // Lists monitored resource descriptors that match a filter. This method does not require a Workspace. + // Lists monitored resource descriptors that match a filter. ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) - // Gets a single monitored resource descriptor. This method does not require a Workspace. + // Gets a single monitored resource descriptor. GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) - // Lists metric descriptors that match a filter. This method does not require a Workspace. + // Lists metric descriptors that match a filter. ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) - // Gets a single metric descriptor. This method does not require a Workspace. + // Gets a single metric descriptor. GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) // Creates a new metric descriptor. - // The creation is executed asynchronously and callers may check the returned - // operation to track its progress. + // The creation is executed asynchronously. // User-created metric descriptors define // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) // Deletes a metric descriptor. Only user-created // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be // deleted. DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) - // Lists time series that match a filter. This method does not require a Workspace. + // Lists time series that match a filter. ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) // Creates or adds data to one or more service time series. A service time // series is a time series for a metric from a Google Cloud service. The diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index c88373124a..643b244e4d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/mutation_record.proto package monitoringpb @@ -139,7 +139,7 @@ func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_mutation_record_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{ (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp } @@ -158,7 +158,7 @@ func file_google_monitoring_v3_mutation_record_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*MutationRecord); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index ac5e389cb7..603b5bcdde 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/notification.proto package monitoringpb @@ -563,7 +563,7 @@ func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_monitoring_v3_notification_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_notification_proto_goTypes = []any{ (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel @@ -600,7 +600,7 @@ func file_google_monitoring_v3_notification_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_mutation_record_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*NotificationChannelDescriptor); i { case 0: return &v.state @@ -612,7 +612,7 @@ func file_google_monitoring_v3_notification_proto_init() { return nil } } - file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*NotificationChannel); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 11f568083d..ac7bafd1f1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/notification_service.proto package monitoringpb @@ -1242,7 +1242,7 @@ func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_monitoring_v3_notification_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_notification_service_proto_goTypes = []any{ (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest @@ -1304,7 +1304,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { } file_google_monitoring_v3_notification_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelDescriptorsRequest); i { case 0: return &v.state @@ -1316,7 +1316,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelDescriptorsResponse); i { case 0: return &v.state @@ -1328,7 +1328,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelDescriptorRequest); i { case 0: return &v.state @@ -1340,7 +1340,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateNotificationChannelRequest); i { case 0: return &v.state @@ -1352,7 +1352,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelsRequest); i { case 0: return &v.state @@ -1364,7 +1364,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelsResponse); i { case 0: return &v.state @@ -1376,7 +1376,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelRequest); i { case 0: return &v.state @@ -1388,7 +1388,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*UpdateNotificationChannelRequest); i { case 0: return &v.state @@ -1400,7 +1400,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteNotificationChannelRequest); i { case 0: return &v.state @@ -1412,7 +1412,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { case 0: return &v.state @@ -1424,7 +1424,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { case 0: return &v.state @@ -1436,7 +1436,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { case 0: return &v.state @@ -1448,7 +1448,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*VerifyNotificationChannelRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index 4e913d16ac..e9bfbd68f5 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/query_service.proto package monitoringpb @@ -47,11 +47,11 @@ var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xde, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -90,7 +90,7 @@ var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_google_monitoring_v3_query_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_query_service_proto_goTypes = []any{ (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse } @@ -141,7 +141,7 @@ const _ = grpc.SupportPackageIsVersion6 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type QueryServiceClient interface { - // Queries time series using Monitoring Query Language. This method does not require a Workspace. + // Queries time series using Monitoring Query Language. QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) } @@ -164,7 +164,7 @@ func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeS // QueryServiceServer is the server API for QueryService service. type QueryServiceServer interface { - // Queries time series using Monitoring Query Language. This method does not require a Workspace. + // Queries time series using Monitoring Query Language. QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) } diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index 240d5725cb..869a3738c0 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/service.proto package monitoringpb @@ -108,7 +108,7 @@ type Service struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource name for this Service. The format is: + // Identifier. Resource name for this Service. The format is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -124,7 +124,16 @@ type Service struct { // *Service_ClusterIstio_ // *Service_MeshIstio_ // *Service_IstioCanonicalService_ + // *Service_CloudRun_ + // *Service_GkeNamespace_ + // *Service_GkeWorkload_ + // *Service_GkeService_ Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Message that contains the service type and service labels of this service + // if it is a basic service. + // Documentation and examples + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"` // Configuration for how to query telemetry on a Service. Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` // Labels which have been used to annotate the service. Label keys must start @@ -231,6 +240,41 @@ func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService { return nil } +func (x *Service) GetCloudRun() *Service_CloudRun { + if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok { + return x.CloudRun + } + return nil +} + +func (x *Service) GetGkeNamespace() *Service_GkeNamespace { + if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok { + return x.GkeNamespace + } + return nil +} + +func (x *Service) GetGkeWorkload() *Service_GkeWorkload { + if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok { + return x.GkeWorkload + } + return nil +} + +func (x *Service) GetGkeService() *Service_GkeService { + if x, ok := x.GetIdentifier().(*Service_GkeService_); ok { + return x.GkeService + } + return nil +} + +func (x *Service) GetBasicService() *Service_BasicService { + if x != nil { + return x.BasicService + } + return nil +} + func (x *Service) GetTelemetry() *Service_Telemetry { if x != nil { return x.Telemetry @@ -281,6 +325,26 @@ type Service_IstioCanonicalService_ struct { IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"` } +type Service_CloudRun_ struct { + // Type used for Cloud Run services. + CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"` +} + +type Service_GkeNamespace_ struct { + // Type used for GKE Namespaces. + GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"` +} + +type Service_GkeWorkload_ struct { + // Type used for GKE Workloads. + GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"` +} + +type Service_GkeService_ struct { + // Type used for GKE Services (the Kubernetes concept of a service). + GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"` +} + func (*Service_Custom_) isService_Identifier() {} func (*Service_AppEngine_) isService_Identifier() {} @@ -293,6 +357,14 @@ func (*Service_MeshIstio_) isService_Identifier() {} func (*Service_IstioCanonicalService_) isService_Identifier() {} +func (*Service_CloudRun_) isService_Identifier() {} + +func (*Service_GkeNamespace_) isService_Identifier() {} + +func (*Service_GkeWorkload_) isService_Identifier() {} + +func (*Service_GkeService_) isService_Identifier() {} + // A Service-Level Objective (SLO) describes a level of desired good service. It // consists of a service-level indicator (SLI), a performance goal, and a period // over which the objective is to be evaluated against that goal. The SLO can @@ -304,7 +376,7 @@ type ServiceLevelObjective struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource name for this `ServiceLevelObjective`. The format is: + // Identifier. Resource name for this `ServiceLevelObjective`. The format is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1117,8 +1189,9 @@ func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} -// Custom view of service telemetry. Currently a place-holder pending final -// design. +// Use a custom service to designate a service that you want to monitor +// when none of the other service types (like App Engine, Cloud Run, or +// a GKE type) matches your intended service. type Service_Custom struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1164,8 +1237,8 @@ type Service_AppEngine struct { unknownFields protoimpl.UnknownFields // The ID of the App Engine module underlying this service. Corresponds to - // the `module_id` resource label in the `gae_app` monitored resource: - // https://cloud.google.com/monitoring/api/resources#tag_gae_app + // the `module_id` resource label in the [`gae_app` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app). ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` } @@ -1215,8 +1288,8 @@ type Service_CloudEndpoints struct { unknownFields protoimpl.UnknownFields // The name of the Cloud Endpoints service underlying this service. - // Corresponds to the `service` resource label in the `api` monitored - // resource: https://cloud.google.com/monitoring/api/resources#tag_api + // Corresponds to the `service` resource label in the [`api` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_api). Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` } @@ -1490,6 +1563,404 @@ func (x *Service_IstioCanonicalService) GetCanonicalService() string { return "" } +// Cloud Run service. Learn more at https://cloud.google.com/run. +type Service_CloudRun struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Run service. Corresponds to the `service_name` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The location the service is run. Corresponds to the `location` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Service_CloudRun) Reset() { + *x = Service_CloudRun{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudRun) ProtoMessage() {} + +func (x *Service_CloudRun) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead. +func (*Service_CloudRun) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *Service_CloudRun) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *Service_CloudRun) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +// GKE Namespace. The field names correspond to the resource metadata labels +// on monitored resources that fall under a namespace (for example, +// `k8s_container` or `k8s_pod`). +type Service_GkeNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of this namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` +} + +func (x *Service_GkeNamespace) Reset() { + *x = Service_GkeNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeNamespace) ProtoMessage() {} + +func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead. +func (*Service_GkeNamespace) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *Service_GkeNamespace) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeNamespace) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeNamespace) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeNamespace) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond +// to the metadata labels on monitored resources that fall under a workload +// (for example, `k8s_container` or `k8s_pod`). +type Service_GkeWorkload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The type of this workload (for example, "Deployment" or "DaemonSet") + TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"` + // The name of this workload. + TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"` +} + +func (x *Service_GkeWorkload) Reset() { + *x = Service_GkeWorkload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeWorkload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeWorkload) ProtoMessage() {} + +func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead. +func (*Service_GkeWorkload) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Service_GkeWorkload) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeWorkload) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeWorkload) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeWorkload) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerType() string { + if x != nil { + return x.TopLevelControllerType + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerName() string { + if x != nil { + return x.TopLevelControllerName + } + return "" +} + +// GKE Service. The "service" here represents a +// [Kubernetes service +// object](https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on [`k8s_service` +// monitored +// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type Service_GkeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The name of this service. + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_GkeService) Reset() { + *x = Service_GkeService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeService) ProtoMessage() {} + +func (x *Service_GkeService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead. +func (*Service_GkeService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Service_GkeService) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeService) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeService) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeService) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeService) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// A well-known service type, defined by its service type and service labels. +// Documentation and examples +// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type Service_BasicService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of service that this basic service defines, e.g. + // APP_ENGINE service type. + // Documentation and valid values + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Labels that specify the resource that emits the monitoring data which + // is used for SLO reporting of this `Service`. + // Documentation and valid values for given service types + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service_BasicService) Reset() { + *x = Service_BasicService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_BasicService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_BasicService) ProtoMessage() {} + +func (x *Service_BasicService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead. +func (*Service_BasicService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Service_BasicService) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *Service_BasicService) GetServiceLabels() map[string]string { + if x != nil { + return x.ServiceLabels + } + return nil +} + // Configuration for how to query telemetry on a Service. type Service_Telemetry struct { state protoimpl.MessageState @@ -1504,7 +1975,7 @@ type Service_Telemetry struct { func (x *Service_Telemetry) Reset() { *x = Service_Telemetry{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1517,7 +1988,7 @@ func (x *Service_Telemetry) String() string { func (*Service_Telemetry) ProtoMessage() {} func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1530,7 +2001,7 @@ func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { // Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. func (*Service_Telemetry) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11} } func (x *Service_Telemetry) GetResourceName() string { @@ -1550,7 +2021,7 @@ type BasicSli_AvailabilityCriteria struct { func (x *BasicSli_AvailabilityCriteria) Reset() { *x = BasicSli_AvailabilityCriteria{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1563,7 +2034,7 @@ func (x *BasicSli_AvailabilityCriteria) String() string { func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1593,7 +2064,7 @@ type BasicSli_LatencyCriteria struct { func (x *BasicSli_LatencyCriteria) Reset() { *x = BasicSli_LatencyCriteria{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1606,7 +2077,7 @@ func (x *BasicSli_LatencyCriteria) String() string { func (*BasicSli_LatencyCriteria) ProtoMessage() {} func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1651,7 +2122,7 @@ type WindowsBasedSli_PerformanceThreshold struct { func (x *WindowsBasedSli_PerformanceThreshold) Reset() { *x = WindowsBasedSli_PerformanceThreshold{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1664,7 +2135,7 @@ func (x *WindowsBasedSli_PerformanceThreshold) String() string { func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1748,7 +2219,7 @@ type WindowsBasedSli_MetricRange struct { func (x *WindowsBasedSli_MetricRange) Reset() { *x = WindowsBasedSli_MetricRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[21] + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1761,7 +2232,7 @@ func (x *WindowsBasedSli_MetricRange) String() string { func (*WindowsBasedSli_MetricRange) ProtoMessage() {} func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[21] + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1797,302 +2268,386 @@ var file_google_monitoring_v3_service_proto_rawDesc = []byte{ 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0c, 0x0a, 0x07, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, - 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, - 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, + 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, + 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, - 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, - 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, - 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, - 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, - 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, + 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, - 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, - 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x1a, 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, - 0x69, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, + 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, - 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, - 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, - 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, - 0x0a, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, - 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x30, 0x0a, 0x09, 0x54, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, - 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, - 0x41, 0xa3, 0x01, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, - 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x22, 0xfd, 0x06, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, - 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, 0x42, - 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, - 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x65, - 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, - 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, - 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, 0x12, - 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, 0x02, - 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, - 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, - 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x3d, - 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, - 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, 0x0a, - 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, 0x6e, - 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x16, - 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0c, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a, + 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, + 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, + 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, + 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, + 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, + 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, - 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, - 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, 0x4c, - 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x37, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, 0x63, - 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, 0x64, - 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, 0x6f, - 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, 0x64, - 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, 0x0f, - 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x42, - 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, 0x0a, - 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, 0x64, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, - 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, 0x0a, - 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, - 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, - 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, - 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x64, - 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x64, - 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x48, - 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, - 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, + 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, + 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, + 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, + 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, + 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, + 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, 0x69, - 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, - 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, 0x0a, - 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, 0x72, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, 0x69, - 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, - 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, 0x63, - 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, 0x18, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, - 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, - 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2108,8 +2663,8 @@ func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_monitoring_v3_service_proto_goTypes = []any{ (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View (*Service)(nil), // 1: google.monitoring.v3.Service (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective @@ -2126,15 +2681,21 @@ var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService - (*Service_Telemetry)(nil), // 16: google.monitoring.v3.Service.Telemetry - nil, // 17: google.monitoring.v3.Service.UserLabelsEntry - nil, // 18: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry - (*BasicSli_AvailabilityCriteria)(nil), // 19: google.monitoring.v3.BasicSli.AvailabilityCriteria - (*BasicSli_LatencyCriteria)(nil), // 20: google.monitoring.v3.BasicSli.LatencyCriteria - (*WindowsBasedSli_PerformanceThreshold)(nil), // 21: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - (*WindowsBasedSli_MetricRange)(nil), // 22: google.monitoring.v3.WindowsBasedSli.MetricRange - (*durationpb.Duration)(nil), // 23: google.protobuf.Duration - (calendarperiod.CalendarPeriod)(0), // 24: google.type.CalendarPeriod + (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun + (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace + (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload + (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService + (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService + (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry + nil, // 22: google.monitoring.v3.Service.UserLabelsEntry + nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria + (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria + (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod } var file_google_monitoring_v3_service_proto_depIdxs = []int32{ 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom @@ -2143,33 +2704,39 @@ var file_google_monitoring_v3_service_proto_depIdxs = []int32{ 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService - 16, // 6: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry - 17, // 7: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry - 3, // 8: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator - 23, // 9: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration - 24, // 10: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod - 18, // 11: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry - 4, // 12: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli - 6, // 13: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli - 9, // 14: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli - 19, // 15: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria - 20, // 16: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria - 7, // 17: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio - 8, // 18: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut - 5, // 19: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range - 21, // 20: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - 22, // 21: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 22, // 22: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 23, // 23: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration - 23, // 24: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration - 6, // 25: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli - 4, // 26: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli - 5, // 27: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun + 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace + 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload + 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService + 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService + 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry + 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry + 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator + 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration + 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod + 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli + 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli + 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli + 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria + 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria + 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio + 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut + 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range + 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration + 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration + 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli + 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli + 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name } func init() { file_google_monitoring_v3_service_proto_init() } @@ -2178,7 +2745,7 @@ func file_google_monitoring_v3_service_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Service); i { case 0: return &v.state @@ -2190,7 +2757,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ServiceLevelObjective); i { case 0: return &v.state @@ -2202,7 +2769,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServiceLevelIndicator); i { case 0: return &v.state @@ -2214,7 +2781,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*BasicSli); i { case 0: return &v.state @@ -2226,7 +2793,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Range); i { case 0: return &v.state @@ -2238,7 +2805,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RequestBasedSli); i { case 0: return &v.state @@ -2250,7 +2817,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesRatio); i { case 0: return &v.state @@ -2262,7 +2829,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DistributionCut); i { case 0: return &v.state @@ -2274,7 +2841,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli); i { case 0: return &v.state @@ -2286,7 +2853,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*Service_Custom); i { case 0: return &v.state @@ -2298,7 +2865,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*Service_AppEngine); i { case 0: return &v.state @@ -2310,7 +2877,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*Service_CloudEndpoints); i { case 0: return &v.state @@ -2322,7 +2889,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Service_ClusterIstio); i { case 0: return &v.state @@ -2334,7 +2901,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Service_MeshIstio); i { case 0: return &v.state @@ -2346,7 +2913,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*Service_IstioCanonicalService); i { case 0: return &v.state @@ -2358,7 +2925,67 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudRun); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeWorkload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Service_BasicService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*Service_Telemetry); i { case 0: return &v.state @@ -2370,7 +2997,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*BasicSli_AvailabilityCriteria); i { case 0: return &v.state @@ -2382,7 +3009,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*BasicSli_LatencyCriteria); i { case 0: return &v.state @@ -2394,7 +3021,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { case 0: return &v.state @@ -2406,7 +3033,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli_MetricRange); i { case 0: return &v.state @@ -2419,38 +3046,42 @@ func file_google_monitoring_v3_service_proto_init() { } } } - file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{ (*Service_Custom_)(nil), (*Service_AppEngine_)(nil), (*Service_CloudEndpoints_)(nil), (*Service_ClusterIstio_)(nil), (*Service_MeshIstio_)(nil), (*Service_IstioCanonicalService_)(nil), + (*Service_CloudRun_)(nil), + (*Service_GkeNamespace_)(nil), + (*Service_GkeWorkload_)(nil), + (*Service_GkeService_)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{ (*ServiceLevelObjective_RollingPeriod)(nil), (*ServiceLevelObjective_CalendarPeriod)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{ (*ServiceLevelIndicator_BasicSli)(nil), (*ServiceLevelIndicator_RequestBased)(nil), (*ServiceLevelIndicator_WindowsBased)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{ (*BasicSli_Availability)(nil), (*BasicSli_Latency)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{ (*RequestBasedSli_GoodTotalRatio)(nil), (*RequestBasedSli_DistributionCut)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{ (*WindowsBasedSli_GoodBadMetricFilter)(nil), (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), (*WindowsBasedSli_MetricMeanInRange)(nil), (*WindowsBasedSli_MetricSumInRange)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[20].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{ (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), } @@ -2460,7 +3091,7 @@ func file_google_monitoring_v3_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, NumEnums: 1, - NumMessages: 22, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index 0b6f287fe8..15e1f04d6a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/service_service.proto package monitoringpb @@ -48,8 +48,9 @@ type CreateServiceRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Resource [name](https://cloud.google.com/monitoring/api/v3#project_name) of - // the parent workspace. The format is: + // Required. Resource + // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the + // parent Metrics Scope. The format is: // // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` @@ -170,32 +171,31 @@ type ListServicesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Resource name of the parent containing the listed services, either a - // [project](https://cloud.google.com/monitoring/api/v3#project_name) or a - // Monitoring Workspace. The formats are: + // Required. Resource name of the parent containing the listed services, + // either a [project](https://cloud.google.com/monitoring/api/v3#project_name) + // or a Monitoring Metrics Scope. The formats are: // // projects/[PROJECT_ID_OR_NUMBER] // workspaces/[HOST_PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // A filter specifying what `Service`s to return. The filter currently - // supports the following fields: + // A filter specifying what `Service`s to return. The filter supports + // filtering on a particular service-identifier type or one of its attributes. // - // - `identifier_case` - // - `app_engine.module_id` - // - `cloud_endpoints.service` (reserved for future use) - // - `mesh_istio.mesh_uid` - // - `mesh_istio.service_namespace` - // - `mesh_istio.service_name` - // - `cluster_istio.location` (deprecated) - // - `cluster_istio.cluster_name` (deprecated) - // - `cluster_istio.service_namespace` (deprecated) - // - `cluster_istio.service_name` (deprecated) + // To filter on a particular service-identifier type, the `identifier_case` + // refers to which option in the `identifier` field is populated. For example, + // the filter `identifier_case = "CUSTOM"` would match all services with a + // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE", + // "MESH_ISTIO", and the other options listed at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service // - // `identifier_case` refers to which option in the identifier oneof is - // populated. For example, the filter `identifier_case = "CUSTOM"` would match - // all services with a value for the `custom` field. Valid options are - // "CUSTOM", "APP_ENGINE", "MESH_ISTIO", plus "CLUSTER_ISTIO" (deprecated) - // and "CLOUD_ENDPOINTS" (reserved for future use). + // To filter on an attribute of a service-identifier type, apply the filter + // name by using the snake case of the service-identifier type and the + // attribute of that service-identifier type, and join the two with a period. + // For example, to filter by the `meshUid` field of the `MeshIstio` + // service-identifier type, you must filter on `mesh_istio.mesh_uid = + // "123"` to match all services with mesh UID "123". Service-identifier types + // and their attributes are described at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // A non-negative number that is the maximum number of results to return. // When 0, use default page size. @@ -448,7 +448,7 @@ type CreateServiceLevelObjectiveRequest struct { Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. The ServiceLevelObjective id to use for this // ServiceLevelObjective. If omitted, an id will be generated instead. Must - // match the pattern `[a-z0-9\-]+` + // match the pattern `^[a-zA-Z0-9-_:.]+$` ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` // Required. The `ServiceLevelObjective` to create. // The provided `name` will be respected if no `ServiceLevelObjective` exists @@ -515,7 +515,8 @@ type GetServiceLevelObjectiveRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Resource name of the `ServiceLevelObjective` to get. The format is: + // Required. Resource name of the `ServiceLevelObjective` to get. The format + // is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -579,7 +580,7 @@ type ListServiceLevelObjectivesRequest struct { unknownFields protoimpl.UnknownFields // Required. Resource name of the parent containing the listed SLOs, either a - // project or a Monitoring Workspace. The formats are: + // project or a Monitoring Metrics Scope. The formats are: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- @@ -792,7 +793,8 @@ type DeleteServiceLevelObjectiveRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Resource name of the `ServiceLevelObjective` to delete. The format is: + // Required. Resource name of the `ServiceLevelObjective` to delete. The + // format is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1140,7 +1142,7 @@ func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_google_monitoring_v3_service_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_service_service_proto_goTypes = []any{ (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest @@ -1204,7 +1206,7 @@ func file_google_monitoring_v3_service_service_proto_init() { } file_google_monitoring_v3_service_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateServiceRequest); i { case 0: return &v.state @@ -1216,7 +1218,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetServiceRequest); i { case 0: return &v.state @@ -1228,7 +1230,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListServicesRequest); i { case 0: return &v.state @@ -1240,7 +1242,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListServicesResponse); i { case 0: return &v.state @@ -1252,7 +1254,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateServiceRequest); i { case 0: return &v.state @@ -1264,7 +1266,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteServiceRequest); i { case 0: return &v.state @@ -1276,7 +1278,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*CreateServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1288,7 +1290,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1300,7 +1302,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ListServiceLevelObjectivesRequest); i { case 0: return &v.state @@ -1312,7 +1314,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListServiceLevelObjectivesResponse); i { case 0: return &v.state @@ -1324,7 +1326,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*UpdateServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1336,7 +1338,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*DeleteServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1385,7 +1387,7 @@ type ServiceMonitoringServiceClient interface { CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) // Get the named `Service`. GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) - // List `Service`s for this workspace. + // List `Service`s for this Metrics Scope. ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) // Update this `Service`. UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) @@ -1507,7 +1509,7 @@ type ServiceMonitoringServiceServer interface { CreateService(context.Context, *CreateServiceRequest) (*Service, error) // Get the named `Service`. GetService(context.Context, *GetServiceRequest) (*Service, error) - // List `Service`s for this workspace. + // List `Service`s for this Metrics Scope. ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) // Update this `Service`. UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go index f27c9bbe2c..ab49868045 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/snooze.proto package monitoringpb @@ -247,7 +247,7 @@ func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_monitoring_v3_snooze_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_snooze_proto_goTypes = []any{ (*Snooze)(nil), // 0: google.monitoring.v3.Snooze (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval @@ -269,7 +269,7 @@ func file_google_monitoring_v3_snooze_proto_init() { } file_google_monitoring_v3_common_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Snooze); i { case 0: return &v.state @@ -281,7 +281,7 @@ func file_google_monitoring_v3_snooze_proto_init() { return nil } } - file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Snooze_Criteria); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go index caa89ac32a..39388a9982 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/snooze_service.proto package monitoringpb @@ -545,7 +545,7 @@ func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_monitoring_v3_snooze_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{ (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse @@ -581,7 +581,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { } file_google_monitoring_v3_snooze_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateSnoozeRequest); i { case 0: return &v.state @@ -593,7 +593,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListSnoozesRequest); i { case 0: return &v.state @@ -605,7 +605,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListSnoozesResponse); i { case 0: return &v.state @@ -617,7 +617,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetSnoozeRequest); i { case 0: return &v.state @@ -629,7 +629,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateSnoozeRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index 4fbce3a163..5a55ecc665 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/span_context.proto package monitoringpb @@ -137,7 +137,7 @@ func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_span_context_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_span_context_proto_goTypes = []any{ (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext } var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ @@ -154,7 +154,7 @@ func file_google_monitoring_v3_span_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SpanContext); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 69124603ba..e0b9e4a385 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/uptime.proto package monitoringpb @@ -293,7 +293,7 @@ func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber { // Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead. func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} } // The HTTP request method options. @@ -346,7 +346,7 @@ func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNum // Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} } // Header options corresponding to the content type of a HTTP request body. @@ -402,7 +402,7 @@ func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumbe // Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} } // An HTTP status code class. @@ -471,7 +471,56 @@ func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() pro // Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead. func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0} +} + +// Type of authentication. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32 + +const ( + // Default value, will result in OIDC Authentication. + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0 + // OIDC Authentication + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType. +var ( + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{ + 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED", + 1: "OIDC_TOKEN", + } + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{ + "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0, + "OIDC_TOKEN": 1, + } +) + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[7] +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0} } // Options to perform content matching. @@ -542,11 +591,11 @@ func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { } func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() + return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() } func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[7] + return &file_google_monitoring_v3_uptime_proto_enumTypes[8] } func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { @@ -555,7 +604,7 @@ func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoref // Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} } // Options to perform JSONPath content matching. @@ -599,11 +648,11 @@ func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) } func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() + return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor() } func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[8] + return &file_google_monitoring_v3_uptime_proto_enumTypes[9] } func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber { @@ -612,7 +661,7 @@ func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) // Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead. func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0} } // An internal checker allows Uptime checks to run on private/internal GCP @@ -722,6 +771,77 @@ func (x *InternalChecker) GetState() InternalChecker_State { return InternalChecker_UNSPECIFIED } +// Describes a Synthetic Monitor to be invoked by Uptime. +type SyntheticMonitorTarget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a Synthetic Monitor's execution stack. + // + // Types that are assignable to Target: + // + // *SyntheticMonitorTarget_CloudFunctionV2 + Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"` +} + +func (x *SyntheticMonitorTarget) Reset() { + *x = SyntheticMonitorTarget{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget) ProtoMessage() {} + +func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target { + if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok { + return x.CloudFunctionV2 + } + return nil +} + +type isSyntheticMonitorTarget_Target interface { + isSyntheticMonitorTarget_Target() +} + +type SyntheticMonitorTarget_CloudFunctionV2 struct { + // Target a Synthetic Monitor GCFv2 instance. + CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"` +} + +func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {} + // This message configures which resources and services to monitor for // availability. type UptimeCheckConfig struct { @@ -729,7 +849,8 @@ type UptimeCheckConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // A unique resource name for this Uptime check configuration. The format is: + // Identifier. A unique resource name for this Uptime check configuration. The + // format is: // // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] // @@ -750,6 +871,7 @@ type UptimeCheckConfig struct { // // *UptimeCheckConfig_MonitoredResource // *UptimeCheckConfig_ResourceGroup_ + // *UptimeCheckConfig_SyntheticMonitor Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` // The type of Uptime check request. // @@ -807,7 +929,7 @@ type UptimeCheckConfig struct { func (x *UptimeCheckConfig) Reset() { *x = UptimeCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -820,7 +942,7 @@ func (x *UptimeCheckConfig) String() string { func (*UptimeCheckConfig) ProtoMessage() {} func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -833,7 +955,7 @@ func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} } func (x *UptimeCheckConfig) GetName() string { @@ -871,6 +993,13 @@ func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup return nil } +func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget { + if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok { + return x.SyntheticMonitor + } + return nil +} + func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { if m != nil { return m.CheckRequestType @@ -976,10 +1105,17 @@ type UptimeCheckConfig_ResourceGroup_ struct { ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` } +type UptimeCheckConfig_SyntheticMonitor struct { + // Specifies a Synthetic Monitor to invoke. + SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"` +} + func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} +func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {} + type isUptimeCheckConfig_CheckRequestType interface { isUptimeCheckConfig_CheckRequestType() } @@ -1022,7 +1158,7 @@ type UptimeCheckIp struct { func (x *UptimeCheckIp) Reset() { *x = UptimeCheckIp{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1035,7 +1171,7 @@ func (x *UptimeCheckIp) String() string { func (*UptimeCheckIp) ProtoMessage() {} func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1048,7 +1184,7 @@ func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. func (*UptimeCheckIp) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3} } func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { @@ -1072,6 +1208,69 @@ func (x *UptimeCheckIp) GetIpAddress() string { return "" } +// A Synthetic Monitor deployed to a Cloud Functions V2 instance. +type SyntheticMonitorTarget_CloudFunctionV2Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Fully qualified GCFv2 resource name + // i.e. `projects/{project}/locations/{location}/functions/{function}` + // Required. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The `cloud_run_revision` Monitored Resource associated with + // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and + // spans) are reported against this Monitored Resource. This field is output + // only. + CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"` +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() { + *x = SyntheticMonitorTarget_CloudFunctionV2Target{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource { + if x != nil { + return x.CloudRunRevision + } + return nil +} + // The resource submessage for group checks. It can be used instead of a // monitored resource, when multiple resources are being monitored. type UptimeCheckConfig_ResourceGroup struct { @@ -1090,7 +1289,7 @@ type UptimeCheckConfig_ResourceGroup struct { func (x *UptimeCheckConfig_ResourceGroup) Reset() { *x = UptimeCheckConfig_ResourceGroup{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1103,7 +1302,7 @@ func (x *UptimeCheckConfig_ResourceGroup) String() string { func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1116,7 +1315,7 @@ func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} } func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { @@ -1148,7 +1347,7 @@ type UptimeCheckConfig_PingConfig struct { func (x *UptimeCheckConfig_PingConfig) Reset() { *x = UptimeCheckConfig_PingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1161,7 +1360,7 @@ func (x *UptimeCheckConfig_PingConfig) String() string { func (*UptimeCheckConfig_PingConfig) ProtoMessage() {} func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1174,7 +1373,7 @@ func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1} } func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 { @@ -1208,6 +1407,7 @@ type UptimeCheckConfig_HttpCheck struct { Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // The authentication information. Optional when creating an HTTP check; // defaults to empty. + // Do not set both `auth_method` and `auth_info`. AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` // Boolean specifying whether to encrypt the header information. // Encryption should be specified for any headers related to authentication @@ -1262,12 +1462,20 @@ type UptimeCheckConfig_HttpCheck struct { AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"` // Contains information needed to add pings to an HTTP check. PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` + // This field is optional and should be set only by users interested in + // an authenticated uptime check. + // Do not set both `auth_method` and `auth_info`. + // + // Types that are assignable to AuthMethod: + // + // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ + AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"` } func (x *UptimeCheckConfig_HttpCheck) Reset() { *x = UptimeCheckConfig_HttpCheck{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1280,7 +1488,7 @@ func (x *UptimeCheckConfig_HttpCheck) String() string { func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1293,7 +1501,7 @@ func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2} } func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { @@ -1387,6 +1595,34 @@ func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingCon return nil } +func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod { + if m != nil { + return m.AuthMethod + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication { + if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok { + return x.ServiceAgentAuthentication + } + return nil +} + +type isUptimeCheckConfig_HttpCheck_AuthMethod interface { + isUptimeCheckConfig_HttpCheck_AuthMethod() +} + +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct { + // If specified, Uptime will generate and attach an OIDC JWT token for the + // Monitoring service agent service account as an `Authorization` header + // in the HTTP request when probing. + ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() { +} + // Information required for a TCP Uptime check request. type UptimeCheckConfig_TcpCheck struct { state protoimpl.MessageState @@ -1404,7 +1640,7 @@ type UptimeCheckConfig_TcpCheck struct { func (x *UptimeCheckConfig_TcpCheck) Reset() { *x = UptimeCheckConfig_TcpCheck{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1417,7 +1653,7 @@ func (x *UptimeCheckConfig_TcpCheck) String() string { func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1430,7 +1666,7 @@ func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 3} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3} } func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { @@ -1476,7 +1712,7 @@ type UptimeCheckConfig_ContentMatcher struct { func (x *UptimeCheckConfig_ContentMatcher) Reset() { *x = UptimeCheckConfig_ContentMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1489,7 +1725,7 @@ func (x *UptimeCheckConfig_ContentMatcher) String() string { func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1502,7 +1738,7 @@ func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4} } func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { @@ -1563,7 +1799,7 @@ type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1576,7 +1812,7 @@ func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1589,7 +1825,7 @@ func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protore // Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} } func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { @@ -1625,7 +1861,7 @@ type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() { *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[10] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1638,7 +1874,7 @@ func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string { func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[10] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1651,7 +1887,7 @@ func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoref // Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} } func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode { @@ -1695,6 +1931,59 @@ func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheck func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { } +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// The OIDC token will be generated for the Monitoring service agent service +// account. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of authentication. + Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + if x != nil { + return x.Type + } + return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED +} + // Information needed to perform a JSONPath content match. // Used for `ContentMatcherOption::MATCHES_JSON_PATH` and // `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. @@ -1714,7 +2003,7 @@ type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct { func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() { *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1727,7 +2016,7 @@ func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string { func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {} func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1740,7 +2029,7 @@ func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protor // Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} } func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string { @@ -1789,174 +2078,227 @@ var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x8b, - 0x1f, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x52, 0x0a, 0x0a, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, + 0x02, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, + 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x32, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, + 0x65, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, + 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x1a, 0xef, 0x0e, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, + 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, - 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, - 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xc8, 0x0b, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, - 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, - 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, - 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, - 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, + 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, + 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, - 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, - 0x32, 0x58, 0x58, 0x10, 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, - 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, - 0x58, 0x10, 0x90, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, - 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, - 0xe8, 0x07, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, - 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, - 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, - 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, - 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, - 0x10, 0x02, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, + 0x1e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x31, 0x0a, 0x2d, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x10, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, + 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, + 0x44, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -2063,19 +2405,26 @@ var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, - 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, - 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, - 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0xaf, 0x02, 0xea, 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2090,65 +2439,74 @@ func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { return file_google_monitoring_v3_uptime_proto_rawDescData } -var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_monitoring_v3_uptime_proto_goTypes = []interface{}{ - (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion - (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType - (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State - (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType - (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass - (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 7: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption - (*InternalChecker)(nil), // 9: google.monitoring.v3.InternalChecker - (*UptimeCheckConfig)(nil), // 10: google.monitoring.v3.UptimeCheckConfig - (*UptimeCheckIp)(nil), // 11: google.monitoring.v3.UptimeCheckIp - (*UptimeCheckConfig_ResourceGroup)(nil), // 12: google.monitoring.v3.UptimeCheckConfig.ResourceGroup - (*UptimeCheckConfig_PingConfig)(nil), // 13: google.monitoring.v3.UptimeCheckConfig.PingConfig - (*UptimeCheckConfig_HttpCheck)(nil), // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck - (*UptimeCheckConfig_TcpCheck)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.TcpCheck - (*UptimeCheckConfig_ContentMatcher)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.ContentMatcher - nil, // 17: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry - (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode - nil, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher - (*monitoredres.MonitoredResource)(nil), // 22: google.api.MonitoredResource - (*durationpb.Duration)(nil), // 23: google.protobuf.Duration +var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_monitoring_v3_uptime_proto_goTypes = []any{ + (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion + (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType + (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State + (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType + (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker + (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget + (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig + (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp + (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup + (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig + (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck + (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck + (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher + nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration } var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State - 22, // 1: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource - 12, // 2: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup - 14, // 3: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck - 15, // 4: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck - 23, // 5: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration - 23, // 6: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration - 16, // 7: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher - 3, // 8: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType - 0, // 9: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion - 9, // 10: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker - 17, // 11: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry - 0, // 12: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion - 1, // 13: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType - 4, // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - 18, // 15: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - 20, // 16: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - 5, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - 19, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode - 13, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig - 13, // 20: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig - 7, // 21: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - 21, // 22: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher - 6, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass - 8, // 24: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption - 25, // [25:25] is the sub-list for method output_type - 25, // [25:25] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource + 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup + 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget + 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck + 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck + 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration + 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration + 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher + 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType + 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion + 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker + 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion + 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource + 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType + 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_google_monitoring_v3_uptime_proto_init() } @@ -2157,7 +2515,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*InternalChecker); i { case 0: return &v.state @@ -2169,7 +2527,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig); i { case 0: return &v.state @@ -2181,7 +2551,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckIp); i { case 0: return &v.state @@ -2193,7 +2563,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget_CloudFunctionV2Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ResourceGroup); i { case 0: return &v.state @@ -2205,7 +2587,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_PingConfig); i { case 0: return &v.state @@ -2217,7 +2599,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck); i { case 0: return &v.state @@ -2229,7 +2611,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_TcpCheck); i { case 0: return &v.state @@ -2241,7 +2623,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ContentMatcher); i { case 0: return &v.state @@ -2253,7 +2635,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { case 0: return &v.state @@ -2265,7 +2647,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck_ResponseStatusCode); i { case 0: return &v.state @@ -2277,7 +2659,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher); i { case 0: return &v.state @@ -2290,16 +2684,23 @@ func file_google_monitoring_v3_uptime_proto_init() { } } } - file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{ + (*SyntheticMonitorTarget_CloudFunctionV2)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{ (*UptimeCheckConfig_MonitoredResource)(nil), (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_SyntheticMonitor)(nil), (*UptimeCheckConfig_HttpCheck_)(nil), (*UptimeCheckConfig_TcpCheck_)(nil), } - file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil), } - file_google_monitoring_v3_uptime_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil), (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil), } @@ -2308,8 +2709,8 @@ func file_google_monitoring_v3_uptime_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, - NumEnums: 9, - NumMessages: 13, + NumEnums: 10, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index d0ca8ee494..d4ba902fb0 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/uptime_service.proto package monitoringpb @@ -778,7 +778,7 @@ func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_uptime_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{ (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest @@ -824,7 +824,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { } file_google_monitoring_v3_uptime_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckConfigsRequest); i { case 0: return &v.state @@ -836,7 +836,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckConfigsResponse); i { case 0: return &v.state @@ -848,7 +848,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetUptimeCheckConfigRequest); i { case 0: return &v.state @@ -860,7 +860,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateUptimeCheckConfigRequest); i { case 0: return &v.state @@ -872,7 +872,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateUptimeCheckConfigRequest); i { case 0: return &v.state @@ -884,7 +884,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteUptimeCheckConfigRequest); i { case 0: return &v.state @@ -896,7 +896,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckIpsRequest); i { case 0: return &v.state @@ -908,7 +908,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckIpsResponse); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go index b1e6939033..6f7fe5d7c4 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -59,6 +59,7 @@ func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -377,7 +378,9 @@ func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn { func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go index bd8abc9b27..3c111637e1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -48,6 +48,7 @@ func defaultQueryGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -70,7 +71,7 @@ type internalQueryClient interface { // QueryClient is a client for interacting with Cloud Monitoring API. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // -// The QueryService API is used to manage time series data in Stackdriver +// The QueryService API is used to manage time series data in Cloud // Monitoring. Time series data is a collection of data points that describes // the time-varying values of a metric. type QueryClient struct { @@ -104,7 +105,7 @@ func (c *QueryClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } -// QueryTimeSeries queries time series using Monitoring Query Language. This method does not require a Workspace. +// QueryTimeSeries queries time series using Monitoring Query Language. func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { return c.internalClient.QueryTimeSeries(ctx, req, opts...) } @@ -129,7 +130,7 @@ type queryGRPCClient struct { // NewQueryClient creates a new query service client based on gRPC. // The returned client must be Closed when it is done being used to clean up its underlying connections. // -// The QueryService API is used to manage time series data in Stackdriver +// The QueryService API is used to manage time series data in Cloud // Monitoring. Time series data is a collection of data points that describes // the time-varying values of a metric. func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) { @@ -174,7 +175,9 @@ func (c *queryGRPCClient) Connection() *grpc.ClientConn { func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go index d117730786..7776c425f9 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -59,6 +59,7 @@ func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -174,9 +175,9 @@ type internalServiceMonitoringClient interface { // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // // The Cloud Monitoring Service-Oriented Monitoring API has endpoints for -// managing and querying aspects of a workspace’s services. These include the -// Service's monitored resources, its Service-Level Objectives, and a taxonomy -// of categorized Health Metrics. +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. type ServiceMonitoringClient struct { // The internal transport-dependent client. internalClient internalServiceMonitoringClient @@ -218,7 +219,7 @@ func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitorin return c.internalClient.GetService(ctx, req, opts...) } -// ListServices list Services for this workspace. +// ListServices list Services for this Metrics Scope. func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { return c.internalClient.ListServices(ctx, req, opts...) } @@ -279,9 +280,9 @@ type serviceMonitoringGRPCClient struct { // The returned client must be Closed when it is done being used to clean up its underlying connections. // // The Cloud Monitoring Service-Oriented Monitoring API has endpoints for -// managing and querying aspects of a workspace’s services. These include the -// Service's monitored resources, its Service-Level Objectives, and a taxonomy -// of categorized Health Metrics. +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { clientOpts := defaultServiceMonitoringGRPCClientOptions() if newServiceMonitoringClientHook != nil { @@ -324,7 +325,9 @@ func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn { func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go index 958406f342..32cad577e3 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -53,6 +53,7 @@ func defaultSnoozeGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -230,7 +231,9 @@ func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go index 05fc20350d..d381525137 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -55,6 +55,7 @@ func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -280,7 +281,9 @@ func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn { func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index ddddbd21f2..b3799f0518 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.18.0" +const Version = "1.20.4" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index a6675492b1..f88b277ab6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,61 @@ # Release History +## 1.16.0 (2024-10-17) + +### Features Added + +* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span. + +### Bugs Fixed + +* `BearerTokenPolicy` now rewinds request bodies before retrying + +## 1.15.0 (2024-10-14) + +### Features Added + +* `BearerTokenPolicy` handles CAE claims challenges + +### Bugs Fixed + +* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled. +* Fixed an integer overflow in the retry policy. + +### Other Changes + +* Update dependencies. + +## 1.14.0 (2024-08-07) + +### Features Added + +* Added field `Attributes` to `runtime.StartSpanOptions` to simplify creating spans with attributes. + +### Other Changes + +* Include the HTTP verb and URL in `log.EventRetryPolicy` log entries so it's clear which operation is being retried. + +## 1.13.0 (2024-07-16) + +### Features Added + +- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request. + +## 1.12.0 (2024-06-06) + +### Features Added + +* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success. +* Added func `NewUUID` to the `runtime` package for generating UUIDs. + +### Bugs Fixed + +* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases. + +### Other Changes + +* Updated dependencies. + ## 1.11.1 (2024-04-02) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index 187fe82b97..00f2d5a0ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -192,7 +192,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err } if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { - //provider resource can only be on a tenant or a subscription parent + // provider resource can only be on a tenant or a subscription parent if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { return nil, fmt.Errorf("invalid resource ID: %s", id) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 039b758bf9..6a7c916b43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -34,18 +34,22 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, Scopes: []string{conf.Audience + "/.default"}, }) + // we don't want to modify the underlying array in plOpts.PerRetry perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) - plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + perRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + plOpts.PerRetry = perRetry if !options.DisableRPRegistration { regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) if err != nil { return azruntime.Pipeline{}, err } + // we don't want to modify the underlying array in plOpts.PerCall perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) copy(perCall, plOpts.PerCall) - plOpts.PerCall = append(perCall, regPolicy) + perCall = append(perCall, regPolicy) + plOpts.PerCall = perCall } if plOpts.APIVersion.Name == "" { plOpts.APIVersion.Name = "api-version" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 765fbc6843..8ad3d5400e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -5,7 +5,6 @@ package runtime import ( "context" - "encoding/base64" "fmt" "net/http" "strings" @@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ - OnChallenge: p.onChallenge, - OnRequest: p.onRequest, + OnRequest: p.onRequest, }, }) return p } -func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { - challenge := res.Header.Get(shared.HeaderWWWAuthenticate) - claims, err := parseChallenge(challenge) - if err != nil { - // the challenge contains claims we can't parse - return err - } else if claims != "" { - // request a new token having the specified claims, send the request again - return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) - } - // auth challenge didn't include claims, so this is a simple authorization failure - return azruntime.NewResponseError(res) -} - // onRequest authorizes requests with one or more bearer tokens func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { // authorize the request with a token for the primary tenant - err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes}) if err != nil || len(b.auxResources) == 0 { return err } @@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { return b.btp.Do(req) } - -// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token -// that will satisfy conditional access policies. It returns a non-nil error when the given value contains -// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. -func parseChallenge(wwwAuthenticate string) (string, error) { - claims := "" - var err error - for _, param := range strings.Split(wwwAuthenticate, ",") { - if _, after, found := strings.Cut(param, "claims="); found { - if claims != "" { - // The header contains multiple challenges, at least two of which specify claims. The specs allow this - // but it's unclear what a client should do in this case and there's as yet no concrete example of it. - err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) - break - } - // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded - claims = strings.Trim(after, `\"=`) - // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" - if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { - claims = string(b) - } else { - err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) - break - } - } - } - return claims, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go index 17bd50c673..03cb227d0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" // ResponseError is returned when a request is made to a service and // the service returns a non-success HTTP status code. // Use errors.As() to access this type in the error chain. +// +// When marshaling instances, the RawResponse field will be omitted. +// However, the contents returned by Error() will be preserved. type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 3041984d9b..e3e2d4e588 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -7,6 +7,7 @@ package exported import ( + "bytes" "context" "encoding/base64" "errors" @@ -67,6 +68,42 @@ func (ov opValues) get(value any) bool { return ok } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + // NewRequest creates a new Request with the specified input. // Exported as runtime.NewRequest(). func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 08a9545873..8aec256bd0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -117,12 +117,18 @@ type ResponseError struct { StatusCode int // RawResponse is the underlying HTTP response. - RawResponse *http.Response + RawResponse *http.Response `json:"-"` + + errMsg string } // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + if e.errMsg != "" { + return e.errMsg + } + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} @@ -163,5 +169,33 @@ func (e *ResponseError) Error() string { } fmt.Fprintln(msg, separator) - return msg.String() + e.errMsg = msg.String() + return e.errMsg +} + +// internal type used for marshaling/unmarshaling +type responseError struct { + ErrorCode string `json:"errorCode"` + StatusCode int `json:"statusCode"` + ErrorMessage string `json:"errorMessage"` +} + +func (e ResponseError) MarshalJSON() ([]byte, error) { + return json.Marshal(responseError{ + ErrorCode: e.ErrorCode, + StatusCode: e.StatusCode, + ErrorMessage: e.Error(), + }) +} + +func (e *ResponseError) UnmarshalJSON(data []byte) error { + re := responseError{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + e.ErrorCode = re.ErrorCode + e.StatusCode = re.StatusCode + e.errMsg = re.ErrorMessage + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index ccd4794e9e..a534627605 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -155,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 0d781b31d0..8751b05147 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -131,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 51aede8a2b..7f8d11b8ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -124,7 +124,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { return exported.NewResponseError(p.resp) } - return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out) } // SanitizePollerPath removes any fake-appended suffix from a URL's path. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index 7a56c5211b..048285275d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -119,5 +119,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index ac1c0efb5a..03699fd762 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -115,10 +115,13 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error + + // when the payload is included with the status monitor on + // terminal success it's in the "result" JSON property + payloadPath := "result" + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { - // no final GET required, terminal response should have it } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { return rlErr } else if rl != "" { @@ -134,6 +137,8 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { + // no JSON path when making a final GET request + payloadPath = "" resp, err := p.pl.Do(req) if err != nil { return err @@ -141,5 +146,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index eb3cf651db..6a7a32e034 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -159,7 +159,7 @@ func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, up // ResultHelper processes the response as success or failure. // In the success case, it unmarshals the payload into either a new instance of T or out. // In the failure case, it creates an *azcore.Response error from the response. -func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { +func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error { // short-circuit the simple success case with no response body to unmarshal if resp.StatusCode == http.StatusNoContent { return nil @@ -176,6 +176,18 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { if err != nil { return err } + + if jsonPath != "" && len(payload) > 0 { + // extract the payload from the specified JSON path. + // do this before the zero-length check in case there + // is no payload. + jsonBody := map[string]json.RawMessage{} + if err = json.Unmarshal(payload, &jsonBody); err != nil { + return err + } + payload = jsonBody[jsonPath] + } + if len(payload) == 0 { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 03691cbf02..9f53770e5b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.11.1" + Version = "v1.16.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index 8d98453588..bb37a5efb4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -161,19 +161,20 @@ type BearerTokenOptions struct { // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. type AuthorizationHandler struct { - // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token - // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, - // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't - // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a - // token from its credential according to its configuration. + // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest + // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request + // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send + // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token + // from its credential according to its configuration. OnRequest func(*Request, func(TokenRequestOptions) error) error - // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the - // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible - // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's - // given credential. Implementations that need to perform I/O should use the Request's context, available from - // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, - // the policy will return any 401 response to the client. + // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon + // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle. + // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the + // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given + // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When + // OnChallenge returns nil, the policy will send the Request again. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index cffe692d7e..b960cff0b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -94,6 +94,10 @@ type FetcherForNextLinkOptions struct { // NextReq is the func to be called when requesting subsequent pages. // Used for paged operations that have a custom next link operation. NextReq func(context.Context, string) (*policy.Request, error) + + // StatusCodes contains additional HTTP status codes indicating success. + // The default value is http.StatusOK. + StatusCodes []int } // FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. @@ -105,10 +109,13 @@ type FetcherForNextLinkOptions struct { func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { var req *policy.Request var err error + if options == nil { + options = &FetcherForNextLinkOptions{} + } if nextLink == "" { req, err = firstReq(ctx) } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { - if options != nil && options.NextReq != nil { + if options.NextReq != nil { req, err = options.NextReq(ctx, nextLink) } else { req, err = NewRequest(ctx, http.MethodGet, nextLink) @@ -121,7 +128,9 @@ func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, first if err != nil { return nil, err } - if !HasStatusCode(resp, http.StatusOK) { + successCodes := []int{http.StatusOK} + successCodes = append(successCodes, options.StatusCodes...) + if !HasStatusCode(resp, successCodes...) { return nil, NewResponseError(resp) } return resp, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index cb2a695280..b26db920b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -4,9 +4,12 @@ package runtime import ( + "encoding/base64" "errors" "net/http" + "regexp" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -17,6 +20,11 @@ import ( ) // BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle +// additional authentication challenges, or needing more control over authorization, should +// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions]. +// +// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation type BearerTokenPolicy struct { // mainResource is the resource to be retreived using the tenant specified in the credential mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] @@ -51,8 +59,18 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * if opts == nil { opts = &policy.BearerTokenOptions{} } + ah := opts.AuthorizationHandler + if ah.OnRequest == nil { + // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge + // doesn't get a default so the policy can use a nil check to determine whether the caller + // provided an implementation. + ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + // authNZ sets EnableCAE: true in all cases, no need to duplicate that here + return authNZ(policy.TokenRequestOptions{Scopes: scopes}) + } + } return &BearerTokenPolicy{ - authzHandler: opts.AuthorizationHandler, + authzHandler: ah, cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), @@ -63,6 +81,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * // authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { return func(tro policy.TokenRequestOptions) error { + tro.EnableCAE = true as := acquiringResourceState{p: b, req: req, tro: tro} tk, err := b.mainResource.Get(as) if err != nil { @@ -86,12 +105,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } - var err error - if b.authzHandler.OnRequest != nil { - err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) - } else { - err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) - } + err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) if err != nil { return nil, errorinfo.NonRetriableError(err) } @@ -101,17 +115,54 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } + res, err = b.handleChallenge(req, res, false) + return res, err +} + +// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling +// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge. +// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the +// AuthorizationHandler. +func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) { + var err error if res.StatusCode == http.StatusUnauthorized { b.mainResource.Expire() - if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { - if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { - res, err = req.Next() + if res.Header.Get(shared.HeaderWWWAuthenticate) != "" { + caeChallenge, parseErr := parseCAEChallenge(res) + if parseErr != nil { + return res, parseErr + } + switch { + case caeChallenge != nil: + authNZ := func(tro policy.TokenRequestOptions) error { + // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value + // will be empty at time of writing because CAE is the only feature involving claims. If in + // the future some client needs to specify unrelated claims, this function may need to merge + // them with the challenge claims. + tro.Claims = caeChallenge.params["claims"] + return b.authenticateAndAuthorize(req)(tro) + } + if err = b.authzHandler.OnRequest(req, authNZ); err == nil { + if err = req.RewindBody(); err == nil { + res, err = req.Next() + } + } + case b.authzHandler.OnChallenge != nil && !recursed: + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + if err = req.RewindBody(); err == nil { + if res, err = req.Next(); err == nil { + res, err = b.handleChallenge(req, res, true) + } + } + } else { + // don't retry challenge handling errors + err = errorinfo.NonRetriableError(err) + } + default: + // return the response to the pipeline } } } - if err != nil { - err = errorinfo.NonRetriableError(err) - } return res, err } @@ -121,3 +172,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { } return nil } + +// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none). +// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError. +func parseCAEChallenge(res *http.Response) (*authChallenge, error) { + var ( + caeChallenge *authChallenge + err error + ) + for _, c := range parseChallenges(res) { + if c.scheme == "Bearer" { + if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" { + if b, de := base64.StdEncoding.DecodeString(claims); de == nil { + c.params["claims"] = string(b) + caeChallenge = &c + } else { + // don't include the decoding error because it's something + // unhelpful like "illegal base64 data at input byte 42" + err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims)) + } + break + } + } + } + return caeChallenge, err +} + +var ( + challenge, challengeParams *regexp.Regexp + once = &sync.Once{} +) + +type authChallenge struct { + scheme string + params map[string]string +} + +// parseChallenges assumes authentication challenges have quoted parameter values +func parseChallenges(res *http.Response) []authChallenge { + once.Do(func() { + // matches challenges having quoted parameters, capturing scheme and parameters + challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`) + // captures parameter names and values in a match of the above expression + challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`) + }) + parsed := []authChallenge{} + // WWW-Authenticate can have multiple values, each containing multiple challenges + for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) { + for _, sm := range challenge.FindAllStringSubmatch(h, -1) { + // sm is [challenge, scheme, params] (see regexp documentation on submatches) + c := authChallenge{ + params: make(map[string]string), + scheme: sm[1], + } + for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) { + // sm is [key="value", key, value] (see regexp documentation on submatches) + c.params[sm[1]] = sm[2] + } + parsed = append(parsed, c) + } + } + return parsed +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go index 3df1c12189..f375195c4b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -96,7 +96,10 @@ func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err erro // StartSpanOptions contains the optional values for StartSpan. type StartSpanOptions struct { - // for future expansion + // Kind indicates the kind of Span. + Kind tracing.SpanKind + // Attributes contains key-value pairs of attributes for the span. + Attributes []tracing.Attribute } // StartSpan starts a new tracing span. @@ -114,7 +117,6 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options // we MUST propagate the active tracer before returning so that the trace policy can access it ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) - const newSpanKind = tracing.SpanKindInternal if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), // then the span for Bar() must be suppressed. however, if Bar() makes a REST @@ -126,10 +128,19 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options return ctx, func(err error) {} } } + + if options == nil { + options = &StartSpanOptions{} + } + if options.Kind == 0 { + options.Kind = tracing.SpanKindInternal + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ - Kind: newSpanKind, + Kind: options.Kind, + Attributes: options.Attributes, }) - ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind) return ctx, func(err error) { if err != nil { errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 04d7bb4ecb..4c3a31fea7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - delay := time.Duration((1< o.MaxRetryDelay { + delayFloat := float64(delay) * jitterMultiplier + if delayFloat > float64(math.MaxInt64) { + // the jitter pushed us over MaxInt64, so just use MaxInt64 + delay = time.Duration(math.MaxInt64) + } else { + delay = time.Duration(delayFloat) + } + + if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value delay = o.MaxRetryDelay } + return delay } @@ -102,7 +122,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { try := int32(1) for { resp = nil // reset - log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs + log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil))) // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // the stream may not be at offset 0 when we first get it and we want the same behavior for the diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 06ac95b1b7..7d34b7803a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "mime/multipart" + "net/http" "net/textproto" "net/url" "path" @@ -24,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -44,6 +46,11 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + // EncodeQueryParams will parse and encode any query parameters in the specified URL. // Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { @@ -263,3 +270,12 @@ func SkipBodyDownload(req *policy.Request) { // CtxAPINameKey is used as a context key for adding/retrieving the API name. type CtxAPINameKey = shared.CtxAPINameKey + +// NewUUID returns a new UUID using the RFC4122 algorithm. +func NewUUID() (string, error) { + u, err := uuid.New() + if err != nil { + return "", err + } + return u.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md new file mode 100644 index 0000000000..ea267e4f41 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -0,0 +1,10 @@ +# Breaking Changes + +## v1.6.0 + +### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios + +As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed +identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint +is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error +response can appear in logs but doesn't indicate authentication failed. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 6d4b6feb86..e35f5ad935 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,76 @@ # Release History +## 1.8.0 (2024-10-08) + +### Other Changes +* `AzurePipelinesCredential` sets an additional OIDC request header so that it + receives a 401 instead of a 302 after presenting an invalid system access token +* Allow logging of debugging headers for `AzurePipelinesCredential` and include + them in error messages + +## 1.8.0-beta.3 (2024-09-17) + +### Features Added +* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID` + +### Other Changes +* Removed redundant content from error messages + +## 1.8.0-beta.2 (2024-08-06) + +### Breaking Changes +* `NewManagedIdentityCredential` now returns an error when a user-assigned identity + is specified on a platform whose managed identity API doesn't support that. + `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. + Returning an error instead prevents the credential authenticating an unexpected + identity, causing a client to act with unexpected privileges. The affected + platforms are: + * Azure Arc + * Azure ML (when a resource ID is specified; client IDs are supported) + * Cloud Shell + * Service Fabric + +### Other Changes +* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before + attempting to authenticate a managed identity, it continues to the next credential + in the chain instead of immediately returning an error. + +## 1.8.0-beta.1 (2024-07-17) + +### Features Added +* Restored persistent token caching feature + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Redesigned the persistent caching API. Encryption is now required in all cases + and persistent cache construction is separate from credential construction. + The `PersistentUserAuthentication` example in the package docs has been updated + to demonstrate the new API. + +## 1.7.0 (2024-06-20) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipelines service connection with + workload identity federation + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Removed the persistent token caching API. It will return in v1.8.0-beta.1 + +## 1.7.0-beta.1 (2024-06-10) + +### Features Added +* Restored `AzurePipelinesCredential` and persistent token caching API + +## Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Values which `NewAzurePipelinesCredential` read from environment variables in + prior versions are now parameters +* Renamed `AzurePipelinesServiceConnectionCredentialOptions` to `AzurePipelinesCredentialOptions` + +### Bugs Fixed +* Managed identity bug fixes + ## 1.6.0 (2024-06-10) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index b5acff0e63..96f30b25cc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,7 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: +`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: ![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) @@ -126,12 +126,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ## Credential Types -### Authenticating Azure Hosted Applications +### Credential chains |Credential|Usage |-|- |[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps |[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials + +### Authenticating Azure-Hosted Applications + +|Credential|Usage +|-|- |[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables |[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource |[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes @@ -140,6 +145,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- +|[AzurePipelinesCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzurePipelinesCredential)|Authenticate an Azure Pipelines [service connection](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) |[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion |[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate |[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret @@ -157,7 +163,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI -|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI +|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index f9cc489433..e0bd09c636 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -1,62 +1,46 @@ ## Token caching in the Azure Identity client module -*Token caching* is a feature provided by the Azure Identity library that allows apps to: +Token caching helps apps: - Improve their resilience and performance. -- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. -- Reduce the number of times the user is prompted to authenticate. +- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times users are prompted to authenticate. -When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use. -Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. +#### Caching can't be disabled -### In-memory token caching - -*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. - -**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. +Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances. -#### Caching cannot be disabled +### In-memory token caching -As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. +Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache. ### Persistent token caching -> Only azidentity v1.5.0-beta versions support persistent token caching +Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs. -*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. +Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Storage mechanism | +| Operating system | Encryption facility | |------------------|---------------------------------------| | Linux | kernel key retention service (keyctl) | | macOS | Keychain | -| Windows | DPAPI | - -By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. -However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. +| Windows | Data Protection API (DPAPI) | -With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: - -- Makes the app more resilient to failures. -- Ensures the app can continue to function during an Entra ID outage or disruption. -- Avoids having to prompt users to authenticate each time the process is restarted. - ->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. - -#### Example code - -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. ### Credentials supporting token caching The following table indicates the state of in-memory and persistent caching in each credential type. -**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). +**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | |--------------------------------|---------------------------------------------------------------------|--------------------------| | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `AzurePipelinesCredential` | Supported | Supported | | `ClientAssertionCredential` | Supported | Supported | | `ClientCertificateCredential` | Supported | Supported | | `ClientSecretCredential` | Supported | Supported | @@ -65,6 +49,9 @@ The following table indicates the state of in-memory and persistent caching in e | `EnvironmentCredential` | Supported | Not Supported | | `InteractiveBrowserCredential` | Supported | Supported | | `ManagedIdentityCredential` | Supported | Not Supported | -| `OnBehalfOfCredential` | Supported | Supported | +| `OnBehalfOfCredential` | Supported | Not Supported | | `UsernamePasswordCredential` | Supported | Supported | | `WorkloadIdentityCredential` | Supported | Supported | + +[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication +[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 3564e685e1..c24f67e84a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -10,6 +10,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Enable and configure logging](#enable-and-configure-logging) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) +- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -226,6 +227,15 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| |no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
  • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
  • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Troubleshoot AzurePipelinesCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| +| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| +|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| + ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index bff0c44dac..045f87acd5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_087379b475" + "Tag": "go/azidentity_c55452bbf6" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go index ada4d6501d..840a71469c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -18,10 +18,10 @@ import ( var supportedAuthRecordVersions = []string{"1.0"} -// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as // [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication -// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. -type authenticationRecord struct { +// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user. +type AuthenticationRecord struct { // Authority is the URL of the authority that issued the token. Authority string `json:"authority"` @@ -42,11 +42,11 @@ type authenticationRecord struct { } // UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord -func (a *authenticationRecord) UnmarshalJSON(b []byte) error { +func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error { // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally // different type enables this by assigning all the fields without recursing into this method. - type r authenticationRecord + type r AuthenticationRecord err := json.Unmarshal(b, (*r)(a)) if err != nil { return err @@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error { } // account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. -func (a *authenticationRecord) account() public.Account { +func (a *AuthenticationRecord) account() public.Account { return public.Account{ Environment: a.Authority, HomeAccountID: a.HomeAccountID, @@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account { } } -func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { +func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) { u, err := url.Parse(ar.IDToken.Issuer) if err != nil { - return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) } tenant := ar.IDToken.TenantID if tenant == "" { @@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) if username == "" { username = ar.IDToken.UPN } - return authenticationRecord{ + return AuthenticationRecord{ Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), ClientID: ar.IDToken.Audience, HomeAccountID: ar.Account.HomeAccountID, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index b0965036bb..ce55dc658e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -53,8 +53,14 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -// tokenCachePersistenceOptions contains options for persistent token caching -type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache = internal.Cache // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go index 2655543aee..a4b8ab6f4d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -19,21 +19,22 @@ import ( const ( credNameAzurePipelines = "AzurePipelinesCredential" oidcAPIVersion = "7.1" - systemAccessToken = "SYSTEM_ACCESSTOKEN" systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" + xMsEdgeRef = "x-msedge-ref" + xVssE2eId = "x-vss-e2eid" ) -// azurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See +// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See // [Azure Pipelines documentation] for more information. // // [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation -type azurePipelinesCredential struct { +type AzurePipelinesCredential struct { connectionID, oidcURI, systemAccessToken string cred *ClientAssertionCredential } -// azurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. -type azurePipelinesCredentialOptions struct { +// AzurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. +type AzurePipelinesCredentialOptions struct { azcore.ClientOptions // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. @@ -41,6 +42,11 @@ type azurePipelinesCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -48,31 +54,45 @@ type azurePipelinesCredentialOptions struct { DisableInstanceDiscovery bool } -// newAzurePipelinesCredential is the constructor for AzurePipelinesCredential. In addition to its required arguments, -// it reads a security token for the running build, which is required to authenticate the service connection, from the -// environment variable SYSTEM_ACCESSTOKEN. See the [Azure Pipelines documentation] for an example showing how to set -// this variable in build job YAML. +// NewAzurePipelinesCredential is the constructor for AzurePipelinesCredential. +// +// - tenantID: tenant ID of the service principal federated with the service connection +// - clientID: client ID of that service principal +// - serviceConnectionID: ID of the service connection to authenticate +// - systemAccessToken: security token for the running build. See [Azure Pipelines documentation] for +// an example showing how to get this value. // // [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken -func newAzurePipelinesCredential(tenantID, clientID, serviceConnectionID string, options *azurePipelinesCredentialOptions) (*azurePipelinesCredential, error) { - if options == nil { - options = &azurePipelinesCredentialOptions{} +func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, systemAccessToken string, options *AzurePipelinesCredentialOptions) (*AzurePipelinesCredential, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + if clientID == "" { + return nil, errors.New("no client ID specified") + } + if serviceConnectionID == "" { + return nil, errors.New("no service connection ID specified") + } + if systemAccessToken == "" { + return nil, errors.New("no system access token specified") } u := os.Getenv(systemOIDCRequestURI) if u == "" { return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI) } - sat := os.Getenv(systemAccessToken) - if sat == "" { - return nil, errors.New("no value for environment variable " + systemAccessToken) - } - a := azurePipelinesCredential{ + a := AzurePipelinesCredential{ connectionID: serviceConnectionID, oidcURI: u, - systemAccessToken: sat, + systemAccessToken: systemAccessToken, + } + if options == nil { + options = &AzurePipelinesCredentialOptions{} } + // these headers are useful to the DevOps team when debugging OIDC error responses + options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId) caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } @@ -86,7 +106,7 @@ func newAzurePipelinesCredential(tenantID, clientID, serviceConnectionID string, } // GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. -func (a *azurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { +func (a *AzurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() @@ -94,37 +114,44 @@ func (a *azurePipelinesCredential) GetToken(ctx context.Context, opts policy.Tok return tk, err } -func (a *azurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { +func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID url, err := runtime.EncodeQueryParams(url) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil) } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil) } req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + // instruct endpoint to return 401 instead of 302, if the system access token is invalid + req.Header.Set("X-TFS-FedAuthRedirect", "Suppress") res, err := doForClient(a.cred.client.azClient, req) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil) } if res.StatusCode != http.StatusOK { - msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration." + for _, h := range []string{xMsEdgeRef, xVssE2eId} { + if v := res.Header.Get(h); v != "" { + msg += fmt.Sprintf("\n%s: %s", h, v) + } + } // include the response because its body, if any, probably contains an error message. // OK responses aren't included with errors because they probably contain secrets - return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res) } b, err := runtime.Payload(res) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil) } var r struct { OIDCToken string `json:"oidcToken"` } err = json.Unmarshal(b, &r) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil) } return r.OIDCToken, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 6c35a941b9..2460f66ec1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -113,11 +113,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token if err != nil { // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise msg := createChainedErrorMessage(errs) - if errors.As(err, &unavailableErr) { + var authFailedErr *AuthenticationFailedError + switch { + case errors.As(err, &authFailedErr): + err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse) + if af, ok := err.(*AuthenticationFailedError); ok { + // stop Error() printing the response again; it's already in msg + af.omitResponse = true + } + case errors.As(err, &unavailableErr): err = newCredentialUnavailableError(c.name, msg) - } else { + default: res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, msg, res, err) + err = newAuthenticationFailedError(c.name, msg, res) } } return token, err @@ -126,7 +134,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token func createChainedErrorMessage(errs []error) string { msg := "failed to acquire a token.\nAttempted credentials:" for _, err := range errs { - msg += fmt.Sprintf("\n\t%s", err.Error()) + msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t")) } return msg } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 4cd8c51447..62c12b5465 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,16 +26,27 @@ extends: parameters: CloudConfig: Public: + ServiceConnection: azure-sdk-tests + SubscriptionConfigurationFilePaths: + - eng/common/TestResources/sub-config/AzurePublicMsft.json SubscriptionConfigurations: - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) - EnvVars: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) + EnableRaceDetector: true RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + PreSteps: + - task: AzureCLI@2 + displayName: Set OIDC token + inputs: + addSpnToEnvironment: true + azureSubscription: azure-sdk-tests + inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" + scriptLocation: inlineScript + scriptType: pscore MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index b588750ef3..2307da86f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 80cd96b560..9e6bca1c92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct { // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 9e6772e9b8..f0890fe1ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct { // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache } // ClientSecretCredential authenticates an application with a client secret. @@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 3bd08c685f..7059a510c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -29,8 +29,8 @@ type confidentialClientOptions struct { AdditionallyAllowedTenants []string // Assertion for on-behalf-of authentication Assertion string + Cache Cache DisableInstanceDiscovery, SendX5C bool - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -107,12 +107,12 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } } if err != nil { - // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. - // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr credentialUnavailable - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, err.Error(), res, err) + var ( + authFailedErr *AuthenticationFailedError + unavailableErr credentialUnavailable + ) + if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) { + err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) @@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { - cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + cache, err := internal.ExportReplace(c.opts.Cache, enableCAE) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 551d319946..3cfc0f7bf1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -36,10 +36,13 @@ type DefaultAzureCredentialOptions struct { TenantID string } -// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. -// It combines credentials suitable for deployment with credentials suitable for local development. -// It attempts to authenticate with each of these credential types, in the following order, stopping -// when one provides a token: +// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by +// combining credentials used in Azure hosting environments and credentials used in local development. In +// production, it's better to use a specific credential type so authentication is more predictable and easier +// to debug. +// +// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, +// stopping when one provides a token: // // - [EnvironmentCredential] // - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index cd30bedd5e..53c4c72873 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -25,18 +25,26 @@ type DeviceCodeCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -49,9 +57,6 @@ type DeviceCodeCredentialOptions struct { // applications. TenantID string - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions - // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -101,12 +106,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, DeviceCodePrompt: cp.UserPrompt, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - Record: cp.authenticationRecord, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + Record: cp.AuthenticationRecord, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -116,8 +121,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate prompts a user to log in via the device code flow. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 698650bbb6..b05cb035a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -38,18 +38,30 @@ type AuthenticationFailedError struct { // RawResponse is the HTTP response motivating the error, if available. RawResponse *http.Response - credType string - message string - err error + credType, message string + omitResponse bool } -func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { - return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +func newAuthenticationFailedError(credType, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error. +// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error +// message, because that message is redundant given the response. If the original error isn't a +// CallErr, the returned error incorporates its message. +func newAuthenticationFailedErrorFromMSAL(credType string, err error) error { + msg := "" + res := getResponseFromError(err) + if res == nil { + msg = err.Error() + } + return newAuthenticationFailedError(credType, msg, res) } // Error implements the error interface. Note that the message contents are not contractual and can change over time. func (e *AuthenticationFailedError) Error() string { - if e.RawResponse == nil { + if e.RawResponse == nil || e.omitResponse { return e.credType + ": " + e.message } msg := &bytes.Buffer{} @@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprintln(msg, "Request information not available") } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") body, err := runtime.Payload(e.RawResponse) switch { @@ -83,6 +95,8 @@ func (e *AuthenticationFailedError) Error() string { anchor = "azure-cli" case credNameAzureDeveloperCLI: anchor = "azd" + case credNameAzurePipelines: + anchor = "apc" case credNameCert: anchor = "client-cert" case credNameSecret: @@ -107,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token // because the credential requires user interaction and is configured not to request it automatically. -type authenticationRequiredError struct { +type AuthenticationRequiredError struct { credentialUnavailableError // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. TokenRequestOptions policy.TokenRequestOptions } -func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { - return &authenticationRequiredError{ +func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &AuthenticationRequiredError{ credentialUnavailableError: credentialUnavailableError{ credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", }, @@ -126,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti } var ( - _ credentialUnavailable = (*authenticationRequiredError)(nil) - _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) + _ credentialUnavailable = (*AuthenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil) ) type credentialUnavailable interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum deleted file mode 100644 index c592f283b6..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ /dev/null @@ -1,60 +0,0 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 056785a8a3..848db16e43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -24,18 +24,26 @@ type InteractiveBrowserCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -54,9 +62,6 @@ type InteractiveBrowserCredentialOptions struct { // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -82,13 +87,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, LoginHint: cp.LoginHint, - Record: cp.authenticationRecord, + Record: cp.AuthenticationRecord, RedirectURL: cp.RedirectURL, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -97,8 +102,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate opens the default browser so a user can log in. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go new file mode 100644 index 0000000000..c0cfe76060 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache struct { + // impl is a pointer so a Cache can carry persistent state across copies + impl *impl +} + +// impl is a Cache's private implementation +type impl struct { + // factory constructs storage implementations + factory func(bool) (cache.ExportReplace, error) + // cae and noCAE are previously constructed storage implementations. CAE + // and non-CAE tokens must be stored separately because MSAL's cache doesn't + // observe token claims. If a single storage implementation held both kinds + // of tokens, it could create a reauthentication or error loop by returning + // a non-CAE token lacking a required claim. + cae, noCAE cache.ExportReplace + // mu synchronizes around cae and noCAE + mu *sync.RWMutex +} + +func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) { + if i == nil { + // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory + return nil, nil + } + var ( + err error + xr cache.ExportReplace + ) + i.mu.RLock() + xr = i.cae + if !cae { + xr = i.noCAE + } + i.mu.RUnlock() + if xr != nil { + return xr, nil + } + i.mu.Lock() + defer i.mu.Unlock() + if cae { + if i.cae == nil { + if xr, err = i.factory(cae); err == nil { + i.cae = xr + } + } + return i.cae, err + } + if i.noCAE == nil { + if xr, err = i.factory(cae); err == nil { + i.noCAE = xr + } + } + return i.noCAE, err +} + +// NewCache is the constructor for Cache. It takes a factory instead of an instance +// because it doesn't know whether the Cache will store both CAE and non-CAE tokens. +func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache { + return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}} +} + +// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface. +// It's a function instead of a method on Cache so packages in azidentity and +// azidentity/cache can call it while applications can't. "cae" declares whether the +// caller intends this implementation to store CAE tokens. +func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) { + return c.impl.exportReplace(cae) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go deleted file mode 100644 index b1b4d5c8bd..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -// TokenCachePersistenceOptions contains options for persistent token caching -type TokenCachePersistenceOptions struct { - // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text - // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts - // encryption before falling back to plaintext storage. - AllowUnencryptedStorage bool - - // Name identifies the cache. Set this to isolate data from other applications. - Name string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go deleted file mode 100644 index c1498b4644..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "errors" - - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" -) - -var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") - -// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to -// use a persistent cache must first import the cache module, which will replace this function -// with a platform-specific implementation. -var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { - if o == nil { - return nil, nil - } - return nil, errMissingImport -} - -// CacheFilePath returns the path to the cache file for the given name. -// Defining it in this package makes it available to azidentity tests. -var CacheFilePath = func(name string) (string, error) { - return "", errMissingImport -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 6122cc7005..4c657a92ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -143,6 +143,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if endpoint, ok := os.LookupEnv(identityEndpoint); ok { if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { + if options.ID != nil { + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Service Fabric" c.endpoint = endpoint c.msiType = msiTypeServiceFabric @@ -152,6 +155,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAppService } } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + if options.ID != nil { + return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Azure Arc" c.endpoint = endpoint c.msiType = msiTypeAzureArc @@ -159,9 +165,15 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { c.endpoint = endpoint if _, ok := os.LookupEnv(msiSecret); ok { + if options.ID != nil && options.ID.idKind() != miClientID { + return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only") + } env = "Azure ML" c.msiType = msiTypeAzureML } else { + if options.ID != nil { + return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities") + } env = "Cloud Shell" c.msiType = msiTypeCloudShell } @@ -207,9 +219,10 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) - if err == nil { - _, err = c.azClient.Pipeline().Do(req) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } + res, err := c.azClient.Pipeline().Do(req) if err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { @@ -217,7 +230,16 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } - // send normal token requests from now on because something responded + // because IMDS always responds with JSON, assume a non-JSON response is from something else, such + // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating + b, err := azruntime.Payload(res) + if err != nil { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) + } + if !json.Valid(b) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") + } + // send normal token requests from now on because IMDS responded c.probeIMDS = false } @@ -228,7 +250,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi resp, err := c.azClient.Pipeline().Do(msg) if err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { @@ -239,7 +261,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi switch resp.StatusCode { case http.StatusBadRequest: if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { @@ -256,7 +278,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) } func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { @@ -284,10 +306,10 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac if expiresOn, err := strconv.Atoi(v); err == nil { return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) default: msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) } } @@ -302,15 +324,15 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage key, err := c.getAzureArcSecretKey(ctx, scopes) if err != nil { msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) - return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) } - return c.createAzureArcAuthRequest(ctx, id, scopes, key) + return c.createAzureArcAuthRequest(ctx, scopes, key) case msiTypeAzureML: return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: - return c.createServiceFabricAuthRequest(ctx, id, scopes) + return c.createServiceFabricAuthRequest(ctx, scopes) case msiTypeCloudShell: - return c.createCloudShellAuthRequest(ctx, id, scopes) + return c.createCloudShellAuthRequest(ctx, scopes) default: return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") } @@ -323,13 +345,16 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", imdsAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) + q.Set("api-version", imdsAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) if id != nil { - if id.idKind() == miResourceID { - q.Add(msiResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("object_id", id.String()) + case miResourceID: + q.Set(msiResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -343,13 +368,16 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) q := request.Raw().URL.Query() - q.Add("api-version", "2019-08-01") - q.Add("resource", scopes[0]) + q.Set("api-version", "2019-08-01") + q.Set("resource", scopes[0]) if id != nil { - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("principal_id", id.String()) + case miResourceID: + q.Set(miResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -363,23 +391,24 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } request.Raw().Header.Set("secret", os.Getenv(msiSecret)) q := request.Raw().URL.Query() - q.Add("api-version", "2017-09-01") - q.Add("resource", strings.Join(scopes, " ")) - q.Add("clientid", os.Getenv(defaultIdentityClientID)) + q.Set("api-version", "2017-09-01") + q.Set("resource", strings.Join(scopes, " ")) + q.Set("clientid", os.Getenv(defaultIdentityClientID)) if id != nil { - if id.idKind() == miResourceID { - log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") - q.Set("clientid", "") - q.Set(miResID, id.String()) - } else { + switch id.idKind() { + case miClientID: q.Set("clientid", id.String()) + case miObjectID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil) + case miResourceID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil) } } request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -387,16 +416,8 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte q := request.Raw().URL.Query() request.Raw().Header.Set("Accept", "application/json") request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) - q.Add("api-version", serviceFabricAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", serviceFabricAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } @@ -409,8 +430,8 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key response, err := c.azClient.Pipeline().Do(request) @@ -421,39 +442,39 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour // of the secret key file. Any other status code indicates an error in the request. if response.StatusCode != 401 { msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) - return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key _, p, found := strings.Cut(header, "=") if !found { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil) } expected, err := arcKeyDirectory() if err != nil { return "", err } if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil) } f, err := os.Stat(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil) } if s := f.Size(); s > 4096 { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil) } key, err := os.ReadFile(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil) } return string(key), nil } -func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -461,21 +482,13 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i request.Raw().Header.Set(headerMetadata, "true") request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err @@ -488,14 +501,5 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { return nil, err } - if id != nil { - log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") - q := request.Raw().URL.Query() - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } return request, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 13c043d8e0..1d53579cf3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -22,8 +22,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential" type managedIdentityIDKind int const ( - miClientID managedIdentityIDKind = 0 - miResourceID managedIdentityIDKind = 1 + miClientID managedIdentityIDKind = iota + miObjectID + miResourceID ) // ManagedIDKind identifies the ID of a managed identity as either a client or resource ID @@ -32,7 +33,12 @@ type ManagedIDKind interface { idKind() managedIdentityIDKind } -// ClientID is the client ID of a user-assigned managed identity. +// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ClientID is specified on the following platforms: +// +// - Azure Arc +// - Cloud Shell +// - Service Fabric type ClientID string func (ClientID) idKind() managedIdentityIDKind { @@ -44,7 +50,31 @@ func (c ClientID) String() string { return string(c) } -// ResourceID is the resource ID of a user-assigned managed identity. +// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when an ObjectID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric +type ObjectID string + +func (ObjectID) idKind() managedIdentityIDKind { + return miObjectID +} + +// String returns the string value of the ID. +func (o ObjectID) String() string { + return string(o) +} + +// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ResourceID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric type ResourceID string func (ResourceID) idKind() managedIdentityIDKind { @@ -60,9 +90,10 @@ func (r ResourceID) String() string { type ManagedIdentityCredentialOptions struct { azcore.ClientOptions - // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity - // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that - // some platforms don't accept resource IDs. + // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of + // the hosting environment's default. The value may be the identity's client, object, or resource ID. + // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed + // identities, or the specified kind of ID. ID ManagedIDKind // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have @@ -73,10 +104,11 @@ type ManagedIdentityCredentialOptions struct { dac bool } -// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview +// user-assigned identity. +// +// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index b3d22dbf3c..73363e1c9e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -30,12 +30,12 @@ type publicClientOptions struct { azcore.ClientOptions AdditionallyAllowedTenants []string + Cache Cache DeviceCodePrompt func(context.Context, DeviceCodeMessage) error DisableAutomaticAuthentication bool DisableInstanceDiscovery bool LoginHint, RedirectURL string - Record authenticationRecord - TokenCachePersistenceOptions *tokenCachePersistenceOptions + Record AuthenticationRecord Username, Password string } @@ -48,7 +48,7 @@ type publicClient struct { host string name string opts publicClientOptions - record authenticationRecord + record AuthenticationRecord azClient *azcore.Client } @@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p }, nil } -func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) { if tro == nil { tro = &policy.TokenRequestOptions{} } if len(tro.Scopes) == 0 { if p.defaultScope == nil { - return authenticationRecord{}, errScopeRequired + return AuthenticationRecord{}, errScopeRequired } tro.Scopes = p.defaultScope } client, mu, err := p.client(*tro) if err != nil { - return authenticationRecord{}, err + return AuthenticationRecord{}, err } mu.Lock() defer mu.Unlock() @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) + return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { @@ -222,13 +222,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { - cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + c, err := internal.ExportReplace(p.opts.Cache, enableCAE) if err != nil { return nil, err } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithCache(cache), + public.WithCache(c), public.WithHTTPClient(p), } if enableCAE { @@ -244,8 +244,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke if err == nil { p.record, err = newAuthenticationRecord(ar) } else { - res := getResponseFromError(err) - err = newAuthenticationFailedError(p.name, err.Error(), res, err) + err = newAuthenticationFailedErrorFromMSAL(p.name, err) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index a69bbce34c..1a07fede63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -5,7 +5,19 @@ param ( [hashtable] $AdditionalParameters = @{}, - [hashtable] $DeploymentOutputs + [hashtable] $DeploymentOutputs, + + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $TenantId, + + [Parameter()] + [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] + [string] $TestApplicationId, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments ) $ErrorActionPreference = 'Stop' @@ -16,14 +28,14 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] } Write-Host "Building container" $image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" -FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder ENV GOARCH=amd64 GOWORK=off COPY . /azidentity WORKDIR /azidentity/testdata/managed-id-test @@ -53,9 +65,11 @@ az container create -g $rg -n $aciName --image $image ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` - AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` - AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` - FUNCTIONS_CUSTOMHANDLER_PORT=80 + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index 2a21652930..135feb0178 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' value: deployResources ? usermgdid.id : null } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID' + value: deployResources ? usermgdid.properties.clientId : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID' + value: deployResources ? usermgdid.properties.principalId : null + } { name: 'AzureWebJobsStorage' value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' @@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 294ed81e95..740abd4709 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -25,18 +25,20 @@ type UsernamePasswordCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -54,13 +56,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Record: options.authenticationRecord, - TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.AuthenticationRecord, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -70,7 +72,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st } // Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 459ef64c6f..4fa22dcc12 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.6.0" + version = "v1.8.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 3e43e788e9..6fecada2f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string @@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5 index 897864b852..6bb4acde94 100644 --- a/vendor/github.com/alecthomas/units/renovate.json5 +++ b/vendor/github.com/alecthomas/units/renovate.json5 @@ -8,4 +8,8 @@ "group:allNonMajor", "schedule:earlyMondays", // Run once a week. ], + postUpdateOptions: [ + "gomodTidy", + "gomodUpdateImportPaths" + ] } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 6bd6449440..c3516e018a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -74,7 +74,9 @@ const ( ) // AWS ISOE (Europe) partition's regions. -const () +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) // AWS ISOF partition's regions. const () @@ -244,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -298,6 +293,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -331,6 +332,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -661,6 +671,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -694,6 +713,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -789,30 +817,60 @@ var awsPartition = partition{ }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -822,6 +880,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -831,6 +898,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -847,6 +917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -905,6 +978,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -963,6 +1039,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1018,18 +1097,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1882,6 +1976,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1906,6 +2003,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -3758,6 +3858,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4017,15 +4126,75 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "autoscaling": service{ @@ -4327,6 +4496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4444,91 +4616,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -4693,9 +4780,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "bedrock-ap-northeast-1", }: endpoint{ @@ -4704,6 +4797,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-ap-southeast-1", }: endpoint{ @@ -4712,6 +4813,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4720,6 +4837,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4744,6 +4893,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-runtime-ap-southeast-1", }: endpoint{ @@ -4752,6 +4909,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4760,6 +4933,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4776,6 +4981,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-east-1", }: endpoint{ @@ -4792,6 +5005,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-us-east-1", }: endpoint{ @@ -4808,9 +5029,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -4835,6 +5071,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -4865,6 +5104,12 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5079,69 +5324,262 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -5149,78 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, endpointKey{ Region: "ca-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -5278,51 +5854,123 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -5853,6 +6501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6747,6 +7398,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6759,6 +7413,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6768,18 +7425,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6828,6 +7497,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6877,6 +7549,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6889,6 +7564,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6898,18 +7576,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6958,6 +7648,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7137,12 +7830,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7781,6 +8489,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8777,6 +9503,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8980,9 +9712,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9007,6 +9751,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10263,6 +11025,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10302,6 +11073,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11310,6 +12090,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11490,6 +12279,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -12276,12 +13074,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12430,6 +13234,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13374,6 +14181,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13500,6 +14316,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -13885,6 +14710,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13918,6 +14752,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-ca-central-1", }: endpoint{ @@ -13927,6 +14770,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-east-1", }: endpoint{ @@ -14026,6 +14878,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-east-1", }: endpoint{ @@ -14399,6 +15269,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14437,6 +15319,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14866,6 +15751,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15044,13 +15932,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -15155,6 +16036,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15170,6 +16054,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15182,6 +16069,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16198,16 +17088,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "iotroborunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -16875,6 +17755,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kafka-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -16908,6 +17797,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17063,12 +17961,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17488,6 +18401,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18163,6 +19079,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18636,6 +19555,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18935,6 +19857,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19096,66 +20021,222 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -19195,18 +20276,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -19216,6 +20327,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -19225,6 +20342,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -19234,6 +20357,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -19350,6 +20479,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19389,6 +20521,9 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -19600,12 +20735,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -19809,6 +20962,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -19858,15 +21014,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19909,6 +21077,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20148,6 +21319,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20166,6 +21340,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21024,6 +22213,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21339,6 +22531,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21668,6 +22863,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -21692,6 +22895,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -21700,6 +22911,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -21732,6 +22951,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22053,12 +23280,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -22329,91 +23562,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-4", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -22811,6 +24443,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -22835,6 +24475,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -22843,6 +24491,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -22875,6 +24531,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22965,6 +24629,19 @@ var awsPartition = partition{ }, }, }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "profile": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23365,6 +25042,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -23380,15 +25060,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24427,12 +26119,24 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24442,18 +26146,93 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, "rekognition": service{ @@ -24777,156 +26556,273 @@ var awsPartition = partition{ }, }, "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, + }, + "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-3.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-south-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-3.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-4", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-4.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.ca-central-1.api.aws", - }, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-central-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-north-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-south-1.api.aws", - }, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", }: endpoint{ - Hostname: "resource-explorer-2.eu-west-3.api.aws", + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "il-central-1", + Region: "fips-us-east-2", }: endpoint{ - Hostname: "resource-explorer-2.il-central-1.api.aws", + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "me-central-1", + Region: "fips-us-west-1", }: endpoint{ - Hostname: "resource-explorer-2.me-central-1.api.aws", + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "me-south-1", + Region: "fips-us-west-2", }: endpoint{ - Hostname: "resource-explorer-2.me-south-1.api.aws", + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.sa-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-east-1.api.aws", + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-east-2.api.aws", + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-west-1.api.aws", + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-west-2.api.aws", + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", }, }, }, - "resource-groups": service{ + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -24991,179 +26887,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -25399,33 +27122,81 @@ var awsPartition = partition{ }, "rum": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -26195,6 +27966,44 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -26271,6 +28080,25 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -26309,6 +28137,44 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -26358,6 +28224,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -26377,6 +28292,25 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -26396,6 +28330,44 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -26453,6 +28425,63 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -27765,21 +29794,85 @@ var awsPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "servicecatalog": service{ @@ -28227,6 +30320,36 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29041,18 +31164,36 @@ var awsPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -29062,15 +31203,33 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -29089,6 +31248,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -29098,6 +31275,18 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29107,6 +31296,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -29378,6 +31585,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -30344,6 +32554,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -30353,9 +32566,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30368,6 +32587,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -30607,6 +32829,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31404,41 +33644,115 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -31487,39 +33801,87 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, }, }, "thinclient": service{ @@ -31926,6 +34288,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31959,6 +34330,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -32127,6 +34507,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -32185,6 +34580,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32209,6 +34619,63 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -32221,15 +34688,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, }, }, "voice-chime": service{ @@ -32389,6 +34880,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -32410,12 +34907,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -33418,6 +35924,23 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -33662,6 +36185,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -34007,9 +36539,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -34031,9 +36572,18 @@ var awsPartition = partition{ endpointKey{ Region: "ui-ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ui-ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, endpointKey{ Region: "ui-eu-central-1", }: endpoint{}, @@ -34485,6 +37035,21 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34708,16 +37273,6 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34771,9 +37326,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -35245,6 +37812,19 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35432,6 +38012,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -35672,7 +38262,7 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, @@ -35744,6 +38334,16 @@ var awscnPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "oam": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35799,10 +38399,28 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, }, }, "pipes": service{ @@ -35867,6 +38485,13 @@ var awscnPartition = partition{ }, }, }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35912,31 +38537,9 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, - }, - }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", - }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", - }, + }: endpoint{}, }, }, "resource-groups": service{ @@ -37407,13 +40010,37 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, }, }, @@ -37437,16 +40064,6 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -37497,6 +40114,38 @@ var awsusgovPartition = partition{ }, "bedrock": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -37581,21 +40230,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -38112,9 +40785,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "data-ats.iot": service{ @@ -38295,20 +40998,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, @@ -38994,6 +41717,15 @@ var awsusgovPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -39003,6 +41735,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39016,12 +41757,82 @@ var awsusgovPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, }, }, "es": service{ @@ -40207,6 +43018,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40422,6 +43289,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40628,6 +43505,13 @@ var awsusgovPartition = partition{ }, }, }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -40961,12 +43845,76 @@ var awsusgovPartition = partition{ }, "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -41354,31 +44302,6 @@ var awsusgovPartition = partition{ }, }, }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-west-1.api.aws", - }, - }, - }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -41542,6 +44465,13 @@ var awsusgovPartition = partition{ }, }, }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -41949,6 +44879,46 @@ var awsusgovPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -42200,6 +45170,78 @@ var awsusgovPartition = partition{ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42264,6 +45306,15 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -42273,6 +45324,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -42862,21 +45922,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -43007,6 +46091,46 @@ var awsusgovPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "waf-regional": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43271,6 +46395,20 @@ var awsisoPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43283,6 +46421,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "appconfig": service{ @@ -43320,6 +46461,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43410,6 +46561,15 @@ var awsisoPartition = partition{ }, "datasync": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-iso-west-1", }: endpoint{ @@ -43419,6 +46579,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, @@ -43727,6 +46896,55 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43909,42 +47127,12 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, }, }, "rbin": service{ @@ -43989,37 +47177,10 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-iso-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44028,16 +47189,7 @@ var awsisoPartition = partition{ endpointKey{ Region: "rds.us-iso-west-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44050,12 +47202,12 @@ var awsisoPartition = partition{ Region: "us-iso-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44068,12 +47220,12 @@ var awsisoPartition = partition{ Region: "us-iso-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-west-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44084,40 +47236,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -44226,6 +47358,131 @@ var awsisoPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44241,6 +47498,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sns": service{ @@ -44357,6 +47617,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44449,6 +47716,20 @@ var awsisobPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44456,6 +47737,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44482,6 +47770,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44731,6 +48026,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44818,6 +48120,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44855,24 +48171,9 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, }, }, "rbin": service{ @@ -44899,28 +48200,10 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-isob-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -44933,12 +48216,12 @@ var awsisobPartition = partition{ Region: "us-isob-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", }, endpointKey{ Region: "us-isob-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -44949,22 +48232,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, @@ -45037,6 +48310,82 @@ var awsisobPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -45090,6 +48439,37 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45186,7 +48566,11 @@ var awsisoePartition = partition{ SignatureVersions: []string{"v4"}, }, }, - Regions: regions{}, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, Services: services{}, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 4601f883cc..992ed0464b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } } default: waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 945a8b45e2..d15e3c84c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.0" +const SDKVersion = "1.55.5" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 058334053c..2ca0b19db7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index 04f6c811b6..827bd51942 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req // // Creates and returns access and refresh tokens for clients and applications // that are authenticated using IAM entities. The access token can be used to -// fetch short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques // Indicates that an error from the service occurred while trying to process // a request. // +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { req, out := c.RegisterClientRequest(input) @@ -619,6 +626,15 @@ type CreateTokenInput struct { // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the // result of the StartDeviceAuthorization API. @@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + // SetDeviceCode sets the DeviceCode field's value. func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { s.DeviceCode = &v @@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenOutput's @@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct { // persisted in the Authorization Code GrantOptions for the application. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending // on the grant type that you want: @@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + // SetGrantType sets the GrantType field's value. func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { s.GrantType = &v @@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith type CreateTokenWithIAMOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's @@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that something is wrong with the input to the request. For example, // a required parameter might be missing or out of range. type InvalidRequestException struct { @@ -1731,6 +1842,25 @@ type RegisterClientInput struct { // ClientType is a required field ClientType *string `locationName:"clientType" type:"string" required:"true"` + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []*string `locationName:"scopes" type:"list"` @@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { return s } +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + // SetScopes sets the Scopes field's value. func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { s.Scopes = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index e6242e4928..cadf4584d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -57,6 +57,13 @@ const ( // makes a CreateToken request with an invalid grant type. ErrCodeInvalidGrantException = "InvalidGrantException" + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // @@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientException": newErrorInvalidClientException, "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, "InvalidRequestException": newErrorInvalidRequestException, "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b781..33c88305c4 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9d..78bddf1cee 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c..78f95f2561 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4..118e49e819 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8..05f5e7dfe7 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88..cf9d42aed5 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md index 1ac39f7eeb..5df62b8ed6 100644 --- a/vendor/github.com/edsrzf/mmap-go/README.md +++ b/vendor/github.com/edsrzf/mmap-go/README.md @@ -9,6 +9,6 @@ Operating System Support ======================== This package is tested using GitHub Actions on Linux, macOS, and Windows. It should also work on other Unix-like platforms, but hasn't been tested with them. I'm interested to hear about the results. -I haven't been able to add more features without adding significant complexity, so mmap-go doesn't support `mprotect`, `mincore`, and maybe a few other things. If you're running on a Unix-like platform and need some of these features, I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap). +This package compiles for Plan 9 and WebAssembly, but its functions always return errors. -This package compiles on Plan 9, but its functions always return errors. +Related functions such as `mprotect` and `mincore` aren't included. I haven't found a way to implement them on Windows without introducing significant complexity. If you're running on a Unix-like platform and really need these features, it should still be possible to implement them on top of this package via `syscall`. diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go b/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go new file mode 100644 index 0000000000..cfe1c50b03 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go @@ -0,0 +1,27 @@ +// Copyright 2024 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import "syscall" + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + return nil, syscall.ENOTSUP +} + +func (m MMap) flush() error { + return syscall.ENOTSUP +} + +func (m MMap) lock() error { + return syscall.ENOTSUP +} + +func (m MMap) unlock() error { + return syscall.ENOTSUP +} + +func (m MMap) unmap() error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3..f4e7dbf37b 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b1..daea9dd6d6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e5757549..fa854785d0 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d5..e4ac2a2fff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8..c349c326c7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e40..36c311694c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a0..d8de5ab76f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015..5eb5dbc66f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d61..c54a630838 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc499..0760efe916 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 0000000000..b0eab10090 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 0000000000..928319fb09 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 0000000000..3186b0c349 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 0000000000..f69fdb930f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 0000000000..607e683bd7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 0000000000..35c734be43 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 0000000000..e5b3b6f694 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 0000000000..1dd455bc5a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 0000000000..f1b2e73bd5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 0000000000..52bf4ce53b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 0000000000..547df1df84 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 0000000000..7daa45e19e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 0000000000..30976ce973 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 0000000000..37dfeddc28 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 0000000000..a72c649549 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae653..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b885..f65e8fe3ed 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78f..a29fc7aab6 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 0000000000..38cb9ae101 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c794b2b0c6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..de0965e12d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 0000000000..eaa8504921 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 0000000000..af0a79507e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
    Highlights

    + +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


    + +
    + +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
    Example decoding with encoding/gob 💥 fatal error (out of memory)

    + +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


    + +
    + +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
    Benchmark details

    + +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


    + +
    + +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
    Example encoding 3-level nested Go struct to 1 byte CBOR

    + +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


    + +
    + +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
    Example encoding 3-level nested Go struct to 1 byte CBOR

    + +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


    + +
    + +
    Example using several struct tags

    + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

    + +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
    Example using TagSet and TagOptions

    + +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

    + +### Functions and Interfaces + +
    Functions and interfaces at a glance

    + +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

    + +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
    + Duplicate Map Keys

    + +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

    + +
    + Tag Validity

    + +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

    + +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
    + +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
    diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 0000000000..9c05146d16 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 0000000000..823bff12ce --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 0000000000..ea0f39e24f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 0000000000..ec038a49ec --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 0000000000..85842ac736 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 0000000000..44afb86608 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 0000000000..23f68b984c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 0000000000..6508e291d6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 0000000000..8b4b4bbc59 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 0000000000..31c39336dd --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 0000000000..de175cee4a --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 0000000000..507ab6c184 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 0000000000..81228acf0f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 0000000000..5c4d2b7a42 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 0000000000..b40793b95e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d975773d49..d970c7cf44 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ce99ce521b..888e107c38 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,6 +16,7 @@ package strfmt import ( "encoding" + stderrors "errors" "fmt" "reflect" "strings" @@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { case "datetime": input := data if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format") + return nil, stderrors.New("empty string is an invalid datetime format") } return ParseDateTime(input) case "duration": diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 2b2e463107..20a359bb60 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -176,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { func (m *indexOfInitialisms) sorted() (result []string) { m.sortMutex.Lock() defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { + m.index.Range(func(key, _ interface{}) bool { k := key.(string) result = append(result, k) return true diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go index c52d6bf719..90745d5ca9 100644 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -2,21 +2,7 @@ package swag import "unsafe" -type internalString struct { - Data unsafe.Pointer - Len int -} - // hackStringBytes returns the (unsafe) underlying bytes slice of a string. -func hackStringBytes(str string) []byte { - p := (*internalString)(unsafe.Pointer(&str)).Data - return unsafe.Slice((*byte)(p), len(str)) -} - -/* - * go1.20 version (for when go mod moves to a go1.20 requirement): - func hackStringBytes(str string) []byte { return unsafe.Slice(unsafe.StringData(str), len(str)) } -*/ diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index a8c4e359ea..f59e025932 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,6 +16,7 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" "reflect" @@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md new file mode 100644 index 0000000000..79cf6a077b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -0,0 +1,31 @@ +# Benchmark + +Validating the Kubernetes Swagger API + +## v0.22.6: 60,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op +``` + +## After refact PR: minor but noticable improvements: 25,000,000 allocs +``` +go test -bench Spec +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op +``` + +## After reduce GC pressure PR: 17,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op +``` diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index 7f7fd0be9c..e30d1501f5 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -25,48 +25,55 @@ import ( // According to Swagger spec, default values MUST validate their schema. type defaultValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (d *defaultValidator) resetVisited() { - d.visitedSchemas = map[string]bool{} + if d.visitedSchemas == nil { + d.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range d.visitedSchemas { + delete(d.visitedSchemas, k) + } } -func isVisited(path string, visitedSchemas map[string]bool) bool { - found := visitedSchemas[path] - if !found { - // search for overlapping paths - frags := strings.Split(path, ".") - if len(frags) < 2 { - // shortcut exit on smaller paths - return found +func isVisited(path string, visitedSchemas map[string]struct{}) bool { + _, found := visitedSchemas[path] + if found { + return true + } + + // search for overlapping paths + var ( + parent string + suffix string + ) + for i := len(path) - 2; i >= 0; i-- { + r := path[i] + if r != '.' { + continue } - last := len(frags) - 1 - var currentFragStr, parent string - for i := range frags { - if i == 0 { - currentFragStr = frags[last] - } else { - currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".") - } - if i < last { - parent = strings.Join(frags[0:last-i], ".") - } else { - parent = "" - } - if strings.HasSuffix(parent, currentFragStr) { - found = true - break - } + + parent = path[0:i] + suffix = path[i+1:] + + if strings.HasSuffix(parent, suffix) { + return true } } - return found + + return false } // beingVisited asserts a schema is being visited func (d *defaultValidator) beingVisited(path string) { - d.visitedSchemas[path] = true + d.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool { } // Validate validates the default values declared in the swagger spec -func (d *defaultValidator) Validate() (errs *Result) { - errs = new(Result) +func (d *defaultValidator) Validate() *Result { + errs := poolOfResults.BorrowResult() // will redeem when merged + if d == nil || d.SpecValidator == nil { return errs } @@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema - res := new(Result) + res := poolOfResults.BorrowResult() // will redeem when merged s := d.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -107,7 +115,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Default != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) //#nosec + red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) @@ -176,7 +184,7 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon d.resetVisited() if h.Default != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) @@ -219,11 +227,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return nil } d.beingVisited(path) - res := new(Result) + res := poolOfResults.BorrowResult() s := d.SpecValidator if schema.Default != nil { - res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default)) + res.Merge( + newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -263,11 +273,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri // TODO: Temporary duplicated code. Need to refactor with examples func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := poolOfResults.BorrowResult() s := d.SpecValidator if items != nil { if items.Default != nil { - res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default)) + res.Merge( + newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default), + ) } if items.Items != nil { res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index d5d4b88332..2e2cce1b89 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -23,17 +23,27 @@ import ( // ExampleValidator validates example values defined in a spec type exampleValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (ex *exampleValidator) resetVisited() { - ex.visitedSchemas = map[string]bool{} + if ex.visitedSchemas == nil { + ex.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range ex.visitedSchemas { + delete(ex.visitedSchemas, k) + } } // beingVisited asserts a schema is being visited func (ex *exampleValidator) beingVisited(path string) { - ex.visitedSchemas[path] = true + ex.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -48,8 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool { // - schemas // - individual property // - responses -func (ex *exampleValidator) Validate() (errs *Result) { - errs = new(Result) +func (ex *exampleValidator) Validate() *Result { + errs := poolOfResults.BorrowResult() + if ex == nil || ex.SpecValidator == nil { return errs } @@ -64,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // in: schemas, properties, object, items // not in: headers, parameters without schema - res := new(Result) + res := poolOfResults.BorrowResult() s := ex.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -82,7 +93,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Example != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) //#nosec + red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.MergeAsWarnings(red) @@ -95,6 +106,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else { + poolOfResults.RedeemResult(red) } } @@ -104,6 +117,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else { + poolOfResults.RedeemResult(red) } } } @@ -151,7 +166,7 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo ex.resetVisited() if h.Example != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) @@ -189,7 +204,9 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo if response.Examples != nil { if response.Schema != nil { if example, ok := response.Examples["application/json"]; ok { - res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example)) + res.MergeAsWarnings( + newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), + ) } else { // TODO: validate other media types too res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) @@ -208,10 +225,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } ex.beingVisited(path) s := ex.SpecValidator - res := new(Result) + res := poolOfResults.BorrowResult() if schema.Example != nil { - res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example)) + res.MergeAsWarnings( + newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -252,11 +271,13 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str // func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := poolOfResults.BorrowResult() s := ex.SpecValidator if items != nil { if items.Example != nil { - res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example)) + res.MergeAsWarnings( + newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example), + ) } if items.Items != nil { res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) @@ -265,5 +286,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) } } + return res } diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 0ad996cbbc..9c66cabb2c 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -22,10 +22,32 @@ import ( ) type formatValidator struct { - Format string Path string In string + Format string KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var f *formatValidator + if opts.recycleValidators { + f = poolOfFormatValidators.BorrowValidator() + } else { + f = new(formatValidator) + } + + f.Path = path + f.In = in + f.Format = format + f.KnownFormats = formats + f.Options = opts + + return f } func (f *formatValidator) SetPath(path string) { @@ -33,37 +55,45 @@ func (f *formatValidator) SetPath(path string) { } func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool { - doit := func() bool { - if source == nil { - return false - } - switch source := source.(type) { - case *spec.Items: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Parameter: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Schema: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Header: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - } + if source == nil || f.KnownFormats == nil { + return false + } + + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + default: return false } - r := doit() - debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind) - return r } func (f *formatValidator) Validate(val interface{}) *Result { - result := new(Result) - debugLog("validating \"%v\" against format: %s", val, f.Format) + if f.Options.recycleValidators { + defer func() { + f.redeem() + }() + } + + var result *Result + if f.Options.recycleResult { + result = poolOfResults.BorrowResult() + } else { + result = new(Result) + } if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { result.AddErrors(err) } - if result.HasErrors() { - return result - } - return nil + return result +} + +func (f *formatValidator) redeem() { + poolOfFormatValidators.RedeemValidator(f) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index fc7500c844..e855994dfe 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -101,9 +101,17 @@ type errorHelper struct { // A collection of unexported helpers for error construction } -func (h *errorHelper) sErr(err errors.Error) *Result { +func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { // Builds a Result from standard errors.Error - return &Result{Errors: []error{err}} + var result *Result + if recycle { + result = poolOfResults.BorrowResult() + } else { + result = new(Result) + } + result.Errors = []error{err} + + return result } func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index 7bb12615d8..c556a9b61d 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -15,8 +15,8 @@ package validate import ( + "fmt" "reflect" - "regexp" "strings" "github.com/go-openapi/errors" @@ -35,62 +35,116 @@ type objectValidator struct { PatternProperties map[string]spec.Schema Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions + splitPath []string +} + +func newObjectValidator(path, in string, + maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, + additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *objectValidator + if opts.recycleValidators { + v = poolOfObjectValidators.BorrowValidator() + } else { + v = new(objectValidator) + } + + v.Path = path + v.In = in + v.MaxProperties = maxProperties + v.MinProperties = minProperties + v.Required = required + v.Properties = properties + v.AdditionalProperties = additionalProperties + v.PatternProperties = patternProperties + v.Root = root + v.KnownFormats = formats + v.Options = opts + v.splitPath = strings.Split(v.Path, ".") + + return v } func (o *objectValidator) SetPath(path string) { o.Path = path + o.splitPath = strings.Split(path, ".") } func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool { // TODO: this should also work for structs // there is a problem in the type validator where it will be unhappy about null values // so that requires more testing - r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct) - debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind) - return r + _, isSchema := source.(*spec.Schema) + return isSchema && (kind == reflect.Map || kind == reflect.Struct) } func (o *objectValidator) isProperties() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties } func (o *objectValidator) isDefault() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault } func (o *objectValidator) isExample() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample } func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) { // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). - if t, typeFound := val[jsonType]; typeFound { - if tpe, ok := t.(string); ok && tpe == arrayType { - if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound { - res.AddErrors(errors.Required(jsonItems, o.Path, item)) - } - } + if val == nil { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + return + } + + tpe, isString := t.(string) + if !isString || tpe != arrayType { + return + } + + item, itemsKeyFound := val[jsonItems] + if itemsKeyFound { + return } + + res.AddErrors(errors.Required(jsonItems, o.Path, item)) } func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) { - if !o.isProperties() && !o.isDefault() && !o.isExample() { - if _, itemsKeyFound := val[jsonItems]; itemsKeyFound { - t, typeFound := val[jsonType] - if typeFound { - if tpe, ok := t.(string); !ok || tpe != arrayType { - res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) - } - } else { - // there is no type - res.AddErrors(errors.Required(jsonType, o.Path, t)) - } - } + if val == nil { + return + } + + if o.isProperties() || o.isDefault() || o.isExample() { + return + } + + _, itemsKeyFound := val[jsonItems] + if !itemsKeyFound { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path, t)) + } + + if tpe, isString := t.(string); !isString || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) } } @@ -104,176 +158,274 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) { } func (o *objectValidator) Validate(data interface{}) *Result { - val := data.(map[string]interface{}) - // TODO: guard against nil data + if o.Options.recycleValidators { + defer func() { + o.redeem() + }() + } + + var val map[string]interface{} + if data != nil { + var ok bool + val, ok = data.(map[string]interface{}) + if !ok { + return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult) + } + } numKeys := int64(len(val)) if o.MinProperties != nil && numKeys < *o.MinProperties { - return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties)) + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult) } if o.MaxProperties != nil && numKeys > *o.MaxProperties { - return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties)) + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult) } - res := new(Result) + var res *Result + if o.Options.recycleResult { + res = poolOfResults.BorrowResult() + } else { + res = new(Result) + } o.precheck(res, val) // check validity of field names if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { // Case: additionalProperties: false - for k := range val { - _, regularProperty := o.Properties[k] - matched := false - - for pk := range o.PatternProperties { - if matches, _ := regexp.MatchString(pk, k); matches { - matched = true - break - } + o.validateNoAdditionalProperties(val, res) + } else { + // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + o.validateAdditionalProperties(val, res) + } + + o.validatePropertiesSchema(val, res) + + // Check patternProperties + // TODO: it looks like we have done that twice in many cases + for key, value := range val { + _, regularProperty := o.Properties[key] + matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well + if regularProperty || !matched { + continue + } + + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(data.(map[string]interface{}), key, r) } + } + } - if !regularProperty && k != "$schema" && k != "id" && !matched { - // Special properties "$schema" and "id" are ignored - res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) - - // BUG(fredbi): This section should move to a part dedicated to spec validation as - // it will conflict with regular schemas where a property "headers" is defined. - - // - // Croaks a more explicit message on top of the standard one - // on some recognized cases. - // - // NOTE: edge cases with invalid type assertion are simply ignored here. - // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered - // by higher level callers (the IMPORTANT! tag will be eventually - // removed). - if k == "headers" && val[k] != nil { - // $ref is forbidden in header - if headers, mapOk := val[k].(map[string]interface{}); mapOk { - for headerKey, headerBody := range headers { - if headerBody != nil { - if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk { - if _, found := headerSchema["$ref"]; found { - var msg string - if refString, stringOk := headerSchema["$ref"].(string); stringOk { - msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") - } - res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) - } - } - } - } - } - /* - case "$ref": - if val[k] != nil { - // TODO: check context of that ref: warn about siblings, check against invalid context - } - */ - } + return res +} + +func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) { + for k := range val { + if k == "$schema" || k == "id" { + // special properties "$schema" and "id" are ignored + continue + } + + _, regularProperty := o.Properties[k] + if regularProperty { + continue + } + + matched := false + for pk := range o.PatternProperties { + re, err := compileRegexp(pk) + if err != nil { + continue + } + if matches := re.MatchString(k); matches { + matched = true + break } } - } else { - // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } - for key, value := range val { - _, regularProperty := o.Properties[key] - - // Validates property against "patternProperties" if applicable - // BUG(fredbi): succeededOnce is always false - - // NOTE: how about regular properties which do not match patternProperties? - matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) - - if !(regularProperty || matched || succeededOnce) { - - // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator - if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil { - // AdditionalProperties as Schema - r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } else if regularProperty && !(matched || succeededOnce) { - // TODO: this is dead code since regularProperty=false here - res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key)) - } + if matched { + continue + } + + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k != "headers" || val[k] == nil { + continue + } + + // $ref is forbidden in header + headers, mapOk := val[k].(map[string]interface{}) + if !mapOk { + continue + } + + for headerKey, headerBody := range headers { + if headerBody == nil { + continue + } + + headerSchema, mapOfMapOk := headerBody.(map[string]interface{}) + if !mapOfMapOk { + continue + } + + _, found := headerSchema["$ref"] + if !found { + continue + } + + refString, stringOk := headerSchema["$ref"].(string) + if !stringOk { + continue } + + msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } +} + +func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) { + for key, value := range val { + _, regularProperty := o.Properties[key] + if regularProperty { + continue + } + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + if matched || succeededOnce { + continue + } + + if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil { + continue } - // Valid cases: additionalProperties: true or undefined + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + // AdditionalProperties as Schema + r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(val, key, r) } + // Valid cases: additionalProperties: true or undefined +} - createdFromDefaults := map[string]bool{} +func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) { + createdFromDefaults := map[string]struct{}{} // Property types: // - regular Property + pSchema := poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + defer func() { + poolOfSchemas.RedeemSchema(pSchema) + }() + for pName := range o.Properties { - pSchema := o.Properties[pName] // one instance per iteration - rName := pName - if o.Path != "" { + *pSchema = o.Properties[pName] + var rName string + if o.Path == "" { + rName = pName + } else { rName = o.Path + "." + pName } // Recursively validates each property against its schema - if v, ok := val[pName]; ok { - r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) - res.mergeForField(data.(map[string]interface{}), pName, r) - } else if pSchema.Default != nil { - // If a default value is defined, creates the property from defaults - // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. - createdFromDefaults[pName] = true - res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema) + v, ok := val[pName] + if ok { + r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v) + res.mergeForField(val, pName, r) + + continue } - } - // Check required properties - if len(o.Required) > 0 { - for _, k := range o.Required { - if v, ok := val[k]; !ok && !createdFromDefaults[k] { - res.AddErrors(errors.Required(o.Path+"."+k, o.In, v)) - continue + if pSchema.Default != nil { + // if a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = struct{}{} + if !o.Options.skipSchemataResult { + res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer } } } - // Check patternProperties - // TODO: it looks like we have done that twice in many cases - for key, value := range val { - _, regularProperty := o.Properties[key] - matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res) - if !regularProperty && (matched /*|| succeededOnce*/) { - for _, pName := range patterns { - if v, ok := o.PatternProperties[pName]; ok { - r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } - } + if len(o.Required) == 0 { + return + } + + // Check required properties + for _, k := range o.Required { + v, ok := val[k] + if ok { + continue + } + _, isCreatedFromDefaults := createdFromDefaults[k] + if isCreatedFromDefaults { + continue } + + res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v)) } - return res } // TODO: succeededOnce is not used anywhere func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) { + if len(o.PatternProperties) == 0 { + return false, false, nil + } + matched := false succeededOnce := false - var patterns []string + patterns := make([]string, 0, len(o.PatternProperties)) - for k, schema := range o.PatternProperties { - sch := schema - if match, _ := regexp.MatchString(k, key); match { - patterns = append(patterns, k) - matched = true - validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) + schema := poolOfSchemas.BorrowSchema() + defer func() { + poolOfSchemas.RedeemSchema(schema) + }() - res := validator.Validate(value) - result.Merge(res) + for k := range o.PatternProperties { + re, err := compileRegexp(k) + if err != nil { + continue } - } - // BUG(fredbi): can't get to here. Should remove dead code (commented out). + match := re.MatchString(key) + if !match { + continue + } - // if succeededOnce { - // result.Inc() - // } + *schema = o.PatternProperties[k] + patterns = append(patterns, k) + matched = true + validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options) + + res := validator.Validate(value) + result.Merge(res) + } return matched, succeededOnce, patterns } + +func (o *objectValidator) redeem() { + poolOfObjectValidators.RedeemValidator(o) +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index 8a22ce9911..cfe9b0660f 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -31,6 +31,7 @@ type Opts struct { // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and // /"shelve/*/book/*" respectively. StrictPathParamUniqueness bool + SkipSchemataResult bool } var ( diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go new file mode 100644 index 0000000000..728ed0a569 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -0,0 +1,373 @@ +package validate + +import ( + "sync" + + "github.com/go-openapi/spec" +) + +var ( + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool +) + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + poolOfSchemaValidators = schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + } + + poolOfObjectValidators = objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + } + + poolOfSliceValidators = sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + } + + poolOfItemsValidators = itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + } + + poolOfBasicCommonValidators = basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + } + + poolOfHeaderValidators = headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + } + + poolOfParamValidators = paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + } + + poolOfBasicSliceValidators = basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + } + + poolOfNumberValidators = numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + } + + poolOfStringValidators = stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + } + + poolOfSchemaPropsValidators = schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + } + + poolOfFormatValidators = formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + } + + poolOfTypeValidators = typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + } + + poolOfSchemas = schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + } + + poolOfResults = resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + } +} + +type ( + schemaValidatorsPool struct { + *sync.Pool + } + + objectValidatorsPool struct { + *sync.Pool + } + + sliceValidatorsPool struct { + *sync.Pool + } + + itemsValidatorsPool struct { + *sync.Pool + } + + basicCommonValidatorsPool struct { + *sync.Pool + } + + headerValidatorsPool struct { + *sync.Pool + } + + paramValidatorsPool struct { + *sync.Pool + } + + basicSliceValidatorsPool struct { + *sync.Pool + } + + numberValidatorsPool struct { + *sync.Pool + } + + stringValidatorsPool struct { + *sync.Pool + } + + schemaPropsValidatorsPool struct { + *sync.Pool + } + + formatValidatorsPool struct { + *sync.Pool + } + + typeValidatorsPool struct { + *sync.Pool + } + + schemasPool struct { + *sync.Pool + } + + resultsPool struct { + *sync.Pool + } +) + +func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { + return p.Get().(*SchemaValidator) +} + +func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.Put(s) +} + +func (p objectValidatorsPool) BorrowValidator() *objectValidator { + return p.Get().(*objectValidator) +} + +func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.Put(s) +} + +func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + return p.Get().(*schemaSliceValidator) +} + +func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.Put(s) +} + +func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { + return p.Get().(*itemsValidator) +} + +func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.Put(s) +} + +func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + return p.Get().(*basicCommonValidator) +} + +func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.Put(s) +} + +func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { + return p.Get().(*HeaderValidator) +} + +func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.Put(s) +} + +func (p paramValidatorsPool) BorrowValidator() *ParamValidator { + return p.Get().(*ParamValidator) +} + +func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.Put(s) +} + +func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + return p.Get().(*basicSliceValidator) +} + +func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.Put(s) +} + +func (p numberValidatorsPool) BorrowValidator() *numberValidator { + return p.Get().(*numberValidator) +} + +func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.Put(s) +} + +func (p stringValidatorsPool) BorrowValidator() *stringValidator { + return p.Get().(*stringValidator) +} + +func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.Put(s) +} + +func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + return p.Get().(*schemaPropsValidator) +} + +func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.Put(s) +} + +func (p formatValidatorsPool) BorrowValidator() *formatValidator { + return p.Get().(*formatValidator) +} + +func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.Put(s) +} + +func (p typeValidatorsPool) BorrowValidator() *typeValidator { + return p.Get().(*typeValidator) +} + +func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.Put(s) +} + +func (p schemasPool) BorrowSchema() *spec.Schema { + return p.Get().(*spec.Schema) +} + +func (p schemasPool) RedeemSchema(s *spec.Schema) { + p.Put(s) +} + +func (p resultsPool) BorrowResult() *Result { + return p.Get().(*Result).cleared() +} + +func (p resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + return + } + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 0fe934fe67..605ec39604 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -23,6 +23,8 @@ import ( "github.com/go-openapi/spec" ) +var emptyResult = &Result{MatchCount: 1} + // Result represents a validation result set, composed of // errors and warnings. // @@ -52,6 +54,8 @@ type Result struct { cachedFieldSchemta map[FieldKey][]*spec.Schema cachedItemSchemata map[ItemKey][]*spec.Schema + + wantsRedeemOnMerge bool } // FieldKey is a pair of an object and a field, usable as a key for a map. @@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result { } r.mergeWithoutRootSchemata(other) r.rootObjectSchemata.Append(other.rootObjectSchemata) + if other.wantsRedeemOnMerge { + poolOfResults.RedeemResult(other) + } } return r } @@ -193,6 +200,9 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other * schemata: other.rootObjectSchemata, }) } + if other.wantsRedeemOnMerge { + poolOfResults.RedeemResult(other) + } return r } @@ -216,23 +226,30 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul schemata: other.rootObjectSchemata, }) } + if other.wantsRedeemOnMerge { + poolOfResults.RedeemResult(other) + } return r } // addRootObjectSchemata adds the given schemata for the root object of the result. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addRootObjectSchemata(s *spec.Schema) { - r.rootObjectSchemata.Append(schemata{one: s}) + clone := *s + r.rootObjectSchemata.Append(schemata{one: &clone}) } // addPropertySchemata adds the given schemata for the object and field. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) { if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) } - r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}}) + clone := *schema + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}}) } /* @@ -280,6 +297,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result { r.AddErrors(other.Errors...) r.AddErrors(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + poolOfResults.RedeemResult(other) + } } } return r @@ -295,6 +315,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result { r.AddWarnings(other.Errors...) r.AddWarnings(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + poolOfResults.RedeemResult(other) + } } } return r @@ -365,7 +388,12 @@ func (r *Result) keepRelevantErrors() *Result { strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } - strippedResult := new(Result) + var strippedResult *Result + if r.wantsRedeemOnMerge { + strippedResult = poolOfResults.BorrowResult() + } else { + strippedResult = new(Result) + } strippedResult.Errors = strippedErrors strippedResult.Warnings = strippedWarnings return strippedResult @@ -427,6 +455,27 @@ func (r *Result) AsError() error { return errors.CompositeValidationError(r.Errors...) } +func (r *Result) cleared() *Result { + // clear the Result to be reusable. Keep allocated capacity. + r.Errors = r.Errors[:0] + r.Warnings = r.Warnings[:0] + r.MatchCount = 0 + r.data = nil + r.rootObjectSchemata.one = nil + r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] + r.fieldSchemata = r.fieldSchemata[:0] + r.itemSchemata = r.itemSchemata[:0] + for k := range r.cachedFieldSchemta { + delete(r.cachedFieldSchemta, k) + } + for k := range r.cachedItemSchemata { + delete(r.cachedItemSchemata, k) + } + r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another + + return r +} + // schemata is an arbitrary number of schemata. It does a distinction between zero, // one and many schemata to avoid slice allocations. type schemata struct { diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index 62b91dc5b0..a359331121 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -24,32 +24,32 @@ import ( "github.com/go-openapi/swag" ) -var ( - specSchemaType = reflect.TypeOf(&spec.Schema{}) - specParameterType = reflect.TypeOf(&spec.Parameter{}) - specHeaderType = reflect.TypeOf(&spec.Header{}) - // specItemsType = reflect.TypeOf(&spec.Items{}) -) - // SchemaValidator validates data against a JSON schema type SchemaValidator struct { Path string in string Schema *spec.Schema - validators []valueValidator + validators [8]valueValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } // AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. // // When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error { - res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data) + res := NewSchemaValidator(schema, nil, "", formats, + append(options, WithRecycleValidators(true), withRecycleResults(true))..., + ).Validate(data) + defer func() { + poolOfResults.RedeemResult(res) + }() + if res.HasErrors() { return errors.CompositeValidationError(res.Errors...) } + return nil } @@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr // // Panics if the provided schema is invalid. func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newSchemaValidator(schema, rootSchema, root, formats, opts) +} + +func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator { if schema == nil { return nil } @@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string panic(msg) } } - s := SchemaValidator{ - Path: root, - in: "body", - Schema: schema, - Root: rootSchema, - KnownFormats: formats, - Options: SchemaValidatorOptions{}} - for _, o := range options { - o(&s.Options) + + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *SchemaValidator + if opts.recycleValidators { + s = poolOfSchemaValidators.BorrowValidator() + } else { + s = new(SchemaValidator) } - s.validators = []valueValidator{ + + s.Path = root + s.in = "body" + s.Schema = schema + s.Root = rootSchema + s.Options = opts + s.KnownFormats = formats + + s.validators = [8]valueValidator{ s.typeValidator(), s.schemaPropsValidator(), s.stringValidator(), @@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string s.commonValidator(), s.objectValidator(), } - return &s + + return s } // SetPath sets the path for this schema valdiator @@ -108,17 +127,39 @@ func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool { // Validate validates the data against the schema func (s *SchemaValidator) Validate(data interface{}) *Result { - result := &Result{data: data} if s == nil { - return result + return emptyResult + } + + if s.Options.recycleValidators { + defer func() { + s.redeem() // one-time use validator + }() + } + + var result *Result + if s.Options.recycleResult { + result = poolOfResults.BorrowResult() + result.data = data + } else { + result = &Result{data: data} } - if s.Schema != nil { + + if s.Schema != nil && !s.Options.skipSchemataResult { result.addRootObjectSchemata(s.Schema) } if data == nil { + // early exit with minimal validation result.Merge(s.validators[0].Validate(data)) // type validator result.Merge(s.validators[6].Validate(data)) // common validator + + if s.Options.recycleValidators { + s.validators[0] = nil + s.validators[6] = nil + s.redeemChildren() + } + return result } @@ -164,14 +205,23 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { kind = tpe.Kind() } - for _, v := range s.validators { + for idx, v := range s.validators { if !v.Applies(s.Schema, kind) { - debugLog("%T does not apply for %v", v, kind) + if s.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := v.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[idx] = nil // prevents further (unsafe) usage + } + continue } - err := v.Validate(d) - result.Merge(err) + result.Merge(v.Validate(d)) result.Inc() } result.Inc() @@ -180,81 +230,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { } func (s *SchemaValidator) typeValidator() valueValidator { - return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path} + return newTypeValidator( + s.Path, + s.in, + s.Schema.Type, + s.Schema.Nullable, + s.Schema.Format, + s.Options, + ) } func (s *SchemaValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: s.Path, - In: s.in, - Enum: s.Schema.Enum, - } + return newBasicCommonValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.Enum, + s.Options, + ) } func (s *SchemaValidator) sliceValidator() valueValidator { - return &schemaSliceValidator{ - Path: s.Path, - In: s.in, - MaxItems: s.Schema.MaxItems, - MinItems: s.Schema.MinItems, - UniqueItems: s.Schema.UniqueItems, - AdditionalItems: s.Schema.AdditionalItems, - Items: s.Schema.Items, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, - } + return newSliceValidator( + s.Path, + s.in, + s.Schema.MaxItems, + s.Schema.MinItems, + s.Schema.UniqueItems, + s.Schema.AdditionalItems, + s.Schema.Items, + s.Root, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: s.Path, - In: s.in, - Default: s.Schema.Default, - MultipleOf: s.Schema.MultipleOf, - Maximum: s.Schema.Maximum, - ExclusiveMaximum: s.Schema.ExclusiveMaximum, - Minimum: s.Schema.Minimum, - ExclusiveMinimum: s.Schema.ExclusiveMinimum, - } + return newNumberValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.MultipleOf, + s.Schema.Maximum, + s.Schema.ExclusiveMaximum, + s.Schema.Minimum, + s.Schema.ExclusiveMinimum, + "", + "", + s.Options, + ) } func (s *SchemaValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: s.Path, - In: s.in, - MaxLength: s.Schema.MaxLength, - MinLength: s.Schema.MinLength, - Pattern: s.Schema.Pattern, - } + return newStringValidator( + s.Path, + s.in, + nil, + false, + false, + s.Schema.MaxLength, + s.Schema.MinLength, + s.Schema.Pattern, + s.Options, + ) } func (s *SchemaValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: s.Path, - In: s.in, - Format: s.Schema.Format, - KnownFormats: s.KnownFormats, - } + return newFormatValidator( + s.Path, + s.in, + s.Schema.Format, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) schemaPropsValidator() valueValidator { sch := s.Schema - return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...) + return newSchemaPropsValidator( + s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) objectValidator() valueValidator { - return &objectValidator{ - Path: s.Path, - In: s.in, - MaxProperties: s.Schema.MaxProperties, - MinProperties: s.Schema.MinProperties, - Required: s.Schema.Required, - Properties: s.Schema.Properties, - AdditionalProperties: s.Schema.AdditionalProperties, - PatternProperties: s.Schema.PatternProperties, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, + return newObjectValidator( + s.Path, + s.in, + s.Schema.MaxProperties, + s.Schema.MinProperties, + s.Schema.Required, + s.Schema.Properties, + s.Schema.AdditionalProperties, + s.Schema.PatternProperties, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) redeem() { + poolOfSchemaValidators.RedeemValidator(s) +} + +func (s *SchemaValidator) redeemChildren() { + for i, validator := range s.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[i] = nil // free up allocated children if not in pool } } diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go index 4b4879de8b..65eeebeaab 100644 --- a/vendor/github.com/go-openapi/validate/schema_option.go +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -18,6 +18,9 @@ package validate type SchemaValidatorOptions struct { EnableObjectArrayTypeCheck bool EnableArrayMustHaveItemsCheck bool + recycleValidators bool + recycleResult bool + skipSchemataResult bool } // Option sets optional rules for schema validation @@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option { } } -// Options returns current options +// WithRecycleValidators saves memory allocations and makes validators +// available for a single use of Validate() only. +// +// When a validator is recycled, called MUST not call the Validate() method twice. +func WithRecycleValidators(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleValidators = enable + } +} + +func withRecycleResults(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleResult = enable + } +} + +// WithSkipSchemataResult skips the deep audit payload stored in validation Result +func WithSkipSchemataResult(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.skipSchemataResult = enable + } +} + +// Options returns the current set of options func (svo SchemaValidatorOptions) Options() []Option { return []Option{ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + WithRecycleValidators(svo.recycleValidators), + withRecycleResults(svo.recycleResult), + WithSkipSchemataResult(svo.skipSchemataResult), } } diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index 9bac3d29fb..bbea4a37db 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -30,78 +30,96 @@ type schemaPropsValidator struct { AnyOf []spec.Schema Not *spec.Schema Dependencies spec.Dependencies - anyOfValidators []SchemaValidator - allOfValidators []SchemaValidator - oneOfValidators []SchemaValidator + anyOfValidators []*SchemaValidator + allOfValidators []*SchemaValidator + oneOfValidators []*SchemaValidator notValidator *SchemaValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } func (s *schemaPropsValidator) SetPath(path string) { s.Path = path } -func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator { - anyValidators := make([]SchemaValidator, 0, len(anyOf)) - for _, v := range anyOf { - v := v - anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...)) +func newSchemaPropsValidator( + path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *schemaPropsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) } - allValidators := make([]SchemaValidator, 0, len(allOf)) - for _, v := range allOf { - v := v - allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + + anyValidators := make([]*SchemaValidator, 0, len(anyOf)) + for i := range anyOf { + anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts)) + } + allValidators := make([]*SchemaValidator, 0, len(allOf)) + for i := range allOf { + allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts)) } - oneValidators := make([]SchemaValidator, 0, len(oneOf)) - for _, v := range oneOf { - v := v - oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + oneValidators := make([]*SchemaValidator, 0, len(oneOf)) + for i := range oneOf { + oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts)) } var notValidator *SchemaValidator if not != nil { - notValidator = NewSchemaValidator(not, root, path, formats, options...) - } - - schOptions := &SchemaValidatorOptions{} - for _, o := range options { - o(schOptions) - } - return &schemaPropsValidator{ - Path: path, - In: in, - AllOf: allOf, - OneOf: oneOf, - AnyOf: anyOf, - Not: not, - Dependencies: deps, - anyOfValidators: anyValidators, - allOfValidators: allValidators, - oneOfValidators: oneValidators, - notValidator: notValidator, - Root: root, - KnownFormats: formats, - Options: *schOptions, + notValidator = newSchemaValidator(not, root, path, formats, opts) + } + + var s *schemaPropsValidator + if opts.recycleValidators { + s = poolOfSchemaPropsValidators.BorrowValidator() + } else { + s = new(schemaPropsValidator) } + + s.Path = path + s.In = in + s.AllOf = allOf + s.OneOf = oneOf + s.AnyOf = anyOf + s.Not = not + s.Dependencies = deps + s.anyOfValidators = anyValidators + s.allOfValidators = allValidators + s.oneOfValidators = oneValidators + s.notValidator = notValidator + s.Root = root + s.KnownFormats = formats + s.Options = opts + + return s } -func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool { - r := reflect.TypeOf(source) == specSchemaType - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r +func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool { + _, isSchema := source.(*spec.Schema) + return isSchema } func (s *schemaPropsValidator) Validate(data interface{}) *Result { - mainResult := new(Result) + var mainResult *Result + if s.Options.recycleValidators { + mainResult = poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } // Intermediary error results // IMPORTANT! messages from underlying validators - keepResultAnyOf := new(Result) - keepResultOneOf := new(Result) - keepResultAllOf := new(Result) + keepResultAnyOf := poolOfResults.BorrowResult() + keepResultOneOf := poolOfResults.BorrowResult() + keepResultAllOf := poolOfResults.BorrowResult() + + if s.Options.recycleValidators { + defer func() { + s.redeem() + + // results are redeemed when merged + }() + } // Validates at least one in anyOf schemas var firstSuccess *Result @@ -115,10 +133,9 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { if result.IsValid() { bestFailures = nil succeededOnce = true - if firstSuccess == nil { - firstSuccess = result - } - keepResultAnyOf = new(Result) + firstSuccess = result + _ = keepResultAnyOf.cleared() + break } // MatchCount is used to select errors from the schema with most positive checks @@ -132,6 +149,9 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { } if bestFailures != nil { mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + poolOfResults.RedeemResult(firstSuccess) + } } else if firstSuccess != nil { mainResult.Merge(firstSuccess) } @@ -153,7 +173,7 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { if firstSuccess == nil { firstSuccess = result } - keepResultOneOf = new(Result) + _ = keepResultOneOf.cleared() continue } // MatchCount is used to select errors from the schema with most positive checks @@ -174,8 +194,14 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { if bestFailures != nil { mainResult.Merge(bestFailures) } + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + poolOfResults.RedeemResult(firstSuccess) + } } else if firstSuccess != nil { mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + poolOfResults.RedeemResult(bestFailures) + } } } @@ -187,7 +213,6 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { result := allOfSchema.Validate(data) // We keep inner IMPORTANT! errors no matter what MatchCount tells us keepResultAllOf.Merge(result.keepRelevantErrors()) - // keepResultAllOf.Merge(result) if result.IsValid() { validated++ } @@ -218,7 +243,9 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { if dep, ok := s.Dependencies[key]; ok { if dep.Schema != nil { - mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data)) + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) continue } @@ -238,3 +265,31 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) } + +func (s *schemaPropsValidator) redeem() { + poolOfSchemaPropsValidators.RedeemValidator(s) +} + +func (s *schemaPropsValidator) redeemChildren() { + for _, v := range s.anyOfValidators { + v.redeemChildren() + v.redeem() + } + s.anyOfValidators = nil + for _, v := range s.allOfValidators { + v.redeemChildren() + v.redeem() + } + s.allOfValidators = nil + for _, v := range s.oneOfValidators { + v.redeemChildren() + v.redeem() + } + s.oneOfValidators = nil + + if s.notValidator != nil { + s.notValidator.redeemChildren() + s.notValidator.redeem() + s.notValidator = nil + } +} diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index aa429f5184..e974d3e052 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -32,7 +32,36 @@ type schemaSliceValidator struct { Items *spec.SchemaOrArray Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions +} + +func newSliceValidator(path, in string, + maxItems, minItems *int64, uniqueItems bool, + additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *schemaSliceValidator + if opts.recycleValidators { + v = poolOfSliceValidators.BorrowValidator() + } else { + v = new(schemaSliceValidator) + } + + v.Path = path + v.In = in + v.MaxItems = maxItems + v.MinItems = minItems + v.UniqueItems = uniqueItems + v.AdditionalItems = additionalItems + v.Items = items + v.Root = root + v.KnownFormats = formats + v.Options = opts + + return v } func (s *schemaSliceValidator) SetPath(path string) { @@ -46,7 +75,18 @@ func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bo } func (s *schemaSliceValidator) Validate(data interface{}) *Result { - result := new(Result) + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + var result *Result + if s.Options.recycleResult { + result = poolOfResults.BorrowResult() + } else { + result = new(Result) + } if data == nil { return result } @@ -54,8 +94,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { size := val.Len() if s.Items != nil && s.Items.Schema != nil { - validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...) for i := 0; i < size; i++ { + validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options) validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) value := val.Index(i) result.mergeForSlice(val, i, validator.Validate(value.Interface())) @@ -66,10 +106,11 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { if s.Items != nil && len(s.Items.Schemas) > 0 { itemsSize = len(s.Items.Schemas) for i := 0; i < itemsSize; i++ { - validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) - if val.Len() <= i { + if size <= i { break } + + validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -79,7 +120,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { } if s.AdditionalItems.Schema != nil { for i := itemsSize; i < size-itemsSize+1; i++ { - validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) + validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -103,3 +144,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { result.Inc() return result } + +func (s *schemaSliceValidator) redeem() { + poolOfSliceValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index 30168b7109..ee322a2df1 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -53,25 +53,38 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error { // SpecValidator validates a swagger 2.0 spec type SpecValidator struct { - schema *spec.Schema // swagger 2.0 schema - spec *loads.Document - analyzer *analysis.Spec - expanded *loads.Document - KnownFormats strfmt.Registry - Options Opts // validation options + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options + schemaOptions *SchemaValidatorOptions } // NewSpecValidator creates a new swagger spec validator instance func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + // schema options that apply to all called validators + schemaOptions := new(SchemaValidatorOptions) + for _, o := range []Option{ + SwaggerSchema(true), + WithRecycleValidators(true), + withRecycleResults(true), + } { + o(schemaOptions) + } + return &SpecValidator{ - schema: schema, - KnownFormats: formats, - Options: defaultOpts, + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + schemaOptions: schemaOptions, } } // Validate validates the swagger spec func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { + s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult var sd *loads.Document errs, warnings := new(Result), new(Result) @@ -85,11 +98,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { s.spec = sd s.analyzer = analysis.New(sd.Spec()) - // Swagger schema validator - schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true)) - var obj interface{} - // Raw spec unmarshalling errors + var obj interface{} if err := json.Unmarshal(sd.Raw(), &obj); err != nil { // NOTE: under normal conditions, the *load.Document has been already unmarshalled // So this one is just a paranoid check on the behavior of the spec package @@ -103,6 +113,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { warnings.AddErrors(errs.Warnings...) }() + // Swagger schema validator + schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions) errs.Merge(schv.Validate(obj)) // error - // There may be a point in continuing to try and determine more accurate errors if !s.Options.ContinueOnErrors && errs.HasErrors() { @@ -130,13 +142,13 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } // Values provided as default MUST validate their schema - df := &defaultValidator{SpecValidator: s} + df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(df.Validate()) // Values provided as examples MUST validate their schema // Value provided as examples in a response without schema generate a warning // Known limitations: examples in responses for mime type not application/json are ignored (warning) - ex := &exampleValidator{SpecValidator: s} + ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(ex.Validate()) errs.Merge(s.validateNonEmptyPathParamNames()) @@ -148,22 +160,27 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { - res := new(Result) + res := poolOfResults.BorrowResult() if s.spec.Spec().Paths == nil { // There is no Paths object: error res.AddErrors(noValidPathMsg()) - } else { - if s.spec.Spec().Paths.Paths == nil { - // Paths may be empty: warning - res.AddWarnings(noValidPathMsg()) - } else { - for k := range s.spec.Spec().Paths.Paths { - if strings.Contains(k, "{}") { - res.AddErrors(emptyPathParameterMsg(k)) - } - } + + return res + } + + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + + return res + } + + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) } } + return res } @@ -177,7 +194,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result { // fallback on possible incomplete picture because of previous errors analyzer = s.analyzer } - res := new(Result) + res := poolOfResults.BorrowResult() known := make(map[string]int) for _, v := range analyzer.OperationIDs() { if v != "" { @@ -199,7 +216,7 @@ type dupProp struct { func (s *SpecValidator) validateDuplicatePropertyNames() *Result { // definition can't declare a property that's already defined by one of its ancestors - res := new(Result) + res := poolOfResults.BorrowResult() for k, sch := range s.spec.Spec().Definitions { if len(sch.AllOf) == 0 { continue @@ -248,7 +265,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, schn := nm schc := &sch - res := new(Result) + res := poolOfResults.BorrowResult() for schc.Ref.String() != "" { // gather property names @@ -285,7 +302,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, } func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { - res := new(Result) + res := poolOfResults.BorrowResult() if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there return nil, res @@ -335,7 +352,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array - res := new(Result) + res := poolOfResults.BorrowResult() for method, pi := range s.analyzer.Operations() { for path, op := range pi { @@ -394,7 +411,7 @@ func (s *SpecValidator) validateItems() *Result { // Verifies constraints on array type func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { - res := new(Result) + res := poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { return res } @@ -418,7 +435,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { // Each defined operation path parameters must correspond to a named element in the API's path pattern. // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) - res := new(Result) + res := poolOfResults.BorrowResult() for _, l := range fromPath { var matched bool for _, r := range fromOperation { @@ -474,7 +491,7 @@ func (s *SpecValidator) validateReferencedParameters() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedParamMsg(k)) } @@ -499,7 +516,7 @@ func (s *SpecValidator) validateReferencedResponses() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedResponseMsg(k)) } @@ -534,7 +551,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result { func (s *SpecValidator) validateRequiredDefinitions() *Result { // Each property listed in the required array must be defined in the properties of the model - res := new(Result) + res := poolOfResults.BorrowResult() DEFINITIONS: for d, schema := range s.spec.Spec().Definitions { @@ -553,7 +570,7 @@ DEFINITIONS: func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { // Takes care of recursive property definitions, which may be nested in additionalProperties schemas - res := new(Result) + res := poolOfResults.BorrowResult() propertyMatch := false patternMatch := false additionalPropertiesMatch := false @@ -619,7 +636,7 @@ func (s *SpecValidator) validateParameters() *Result { // - parameters with pattern property must specify valid patterns // - $ref in parameters must resolve // - path param must be required - res := new(Result) + res := poolOfResults.BorrowResult() rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) @@ -664,8 +681,8 @@ func (s *SpecValidator) validateParameters() *Result { } for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { - // An expanded parameter must validate its schema (an unexpanded $ref always pass high-level schema validation) - schv := NewSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, SwaggerSchema(true)) + // An expanded parameter must validate its schema (an unexpanded $ref always passes high-level schema validation) + schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) obj := swag.ToDynamicJSON(pr) paramValidationResult := schv.Validate(obj) res.Merge(paramValidationResult) @@ -751,7 +768,7 @@ func (s *SpecValidator) validateParameters() *Result { func (s *SpecValidator) validateReferencesValid() *Result { // each reference must point to a valid object - res := new(Result) + res := poolOfResults.BorrowResult() for _, r := range s.analyzer.AllRefs() { if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI res.AddErrors(invalidRefMsg(r.String())) @@ -777,7 +794,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio // However, there are some issues with such a factorization: // - analysis does not seem to fully expand params // - param keys may be altered by x-go-name - res := new(Result) + res := poolOfResults.BorrowResult() pnames := make(map[string]struct{}) if op.Parameters != nil { // Safeguard diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index 5398679bff..6d1f0f819c 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -187,6 +187,8 @@ const ( // UnusedResponseWarning ... UnusedResponseWarning = "response %q is not used anywhere" + + InvalidObject = "expected an object in %q.%s" ) // Additional error codes @@ -347,6 +349,9 @@ func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) err func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) } +func invalidObjectMsg(path, in string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) +} // disabled // diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index c7abf380b9..5647a4429e 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -25,11 +25,34 @@ import ( ) type typeValidator struct { + Path string + In string Type spec.StringOrArray Nullable bool Format string - In string - Path string + Options *SchemaValidatorOptions +} + +func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var t *typeValidator + if opts.recycleValidators { + t = poolOfTypeValidators.BorrowValidator() + } else { + t = new(typeValidator) + } + + t.Path = path + t.In = in + t.Type = typ + t.Nullable = nullable + t.Format = format + t.Options = opts + + return t } func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { @@ -125,23 +148,33 @@ func (t *typeValidator) SetPath(path string) { t.Path = path } -func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool { // typeValidator applies to Schema, Parameter and Header objects - stpe := reflect.TypeOf(source) - r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType) - debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind) - return r + switch source.(type) { + case *spec.Schema: + case *spec.Parameter: + case *spec.Header: + default: + return false + } + + return (len(t.Type) > 0 || t.Format != "") } func (t *typeValidator) Validate(data interface{}) *Result { - result := new(Result) - result.Inc() + if t.Options.recycleValidators { + defer func() { + t.redeem() + }() + } + if data == nil { // nil or zero value for the passed structure require Type: null if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) } - return result + + return emptyResult } // check if the type matches, should be used in every validator chain as first item @@ -151,8 +184,6 @@ func (t *typeValidator) Validate(data interface{}) *Result { // infer schema type (JSON) and format from passed data type schType, format := t.schemaInfoForType(data) - debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String()) - // check numerical types // TODO: check unsigned ints // TODO: check json.Number (see schema.go) @@ -163,15 +194,20 @@ func (t *typeValidator) Validate(data interface{}) *Result { if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) { // TODO: test case - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) } if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { - return result + return emptyResult } if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) { - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult) } - return result + + return emptyResult +} + +func (t *typeValidator) redeem() { + poolOfTypeValidators.RedeemValidator(t) } diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index ab4f718b5b..aa322643c3 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -39,20 +39,31 @@ type itemsValidator struct { root interface{} path string in string - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } -func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator { - iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats} - iv.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{items.Type}), - Nullable: items.Nullable, - Format: items.Format, - In: in, - Path: path, - }, +func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var iv *itemsValidator + if opts.recycleValidators { + iv = poolOfItemsValidators.BorrowValidator() + } else { + iv = new(itemsValidator) + } + + iv.path = path + iv.in = in + iv.items = items + iv.root = root + iv.KnownFormats = formats + iv.Options = opts + iv.validators = [6]valueValidator{ + iv.typeValidator(), iv.stringValidator(), iv.formatValidator(), iv.numberValidator(), @@ -63,77 +74,144 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for } func (i *itemsValidator) Validate(index int, data interface{}) *Result { + if i.Options.recycleValidators { + defer func() { + i.redeemChildren() + i.redeem() + }() + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - mainResult := new(Result) + var mainResult *Result + if i.Options.recycleResult { + mainResult = poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } + path := fmt.Sprintf("%s.%d", i.path, index) - for _, validator := range i.validators { - validator.SetPath(path) - if validator.Applies(i.root, kind) { - result := validator.Validate(data) - mainResult.Merge(result) - mainResult.Inc() - if result != nil && result.HasErrors() { - return mainResult + for idx, validator := range i.validators { + if !validator.Applies(i.root, kind) { + if i.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + validator.SetPath(path) + result := validator.Validate(data) + mainResult.Merge(result) + mainResult.Inc() + if result != nil && result.HasErrors() { + break } } + return mainResult } +func (i *itemsValidator) typeValidator() valueValidator { + return newTypeValidator( + i.path, + i.in, + spec.StringOrArray([]string{i.items.Type}), + i.items.Nullable, + i.items.Format, + i.Options, + ) +} + func (i *itemsValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - In: i.in, - Default: i.items.Default, - Enum: i.items.Enum, - } + return newBasicCommonValidator( + "", + i.in, + i.items.Default, + i.items.Enum, + i.Options, + ) } func (i *itemsValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - In: i.in, - Default: i.items.Default, - MaxItems: i.items.MaxItems, - MinItems: i.items.MinItems, - UniqueItems: i.items.UniqueItems, - Source: i.root, - Items: i.items.Items, - KnownFormats: i.KnownFormats, - } + return newBasicSliceValidator( + "", + i.in, + i.items.Default, + i.items.MaxItems, + i.items.MinItems, + i.items.UniqueItems, + i.items.Items, + i.root, + i.KnownFormats, + i.Options, + ) } func (i *itemsValidator) numberValidator() valueValidator { - return &numberValidator{ - In: i.in, - Default: i.items.Default, - MultipleOf: i.items.MultipleOf, - Maximum: i.items.Maximum, - ExclusiveMaximum: i.items.ExclusiveMaximum, - Minimum: i.items.Minimum, - ExclusiveMinimum: i.items.ExclusiveMinimum, - Type: i.items.Type, - Format: i.items.Format, - } + return newNumberValidator( + "", + i.in, + i.items.Default, + i.items.MultipleOf, + i.items.Maximum, + i.items.ExclusiveMaximum, + i.items.Minimum, + i.items.ExclusiveMinimum, + i.items.Type, + i.items.Format, + i.Options, + ) } func (i *itemsValidator) stringValidator() valueValidator { - return &stringValidator{ - In: i.in, - Default: i.items.Default, - MaxLength: i.items.MaxLength, - MinLength: i.items.MinLength, - Pattern: i.items.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + "", + i.in, + i.items.Default, + false, // Required + false, // AllowEmpty + i.items.MaxLength, + i.items.MinLength, + i.items.Pattern, + i.Options, + ) } func (i *itemsValidator) formatValidator() valueValidator { - return &formatValidator{ - In: i.in, - //Default: i.items.Default, - Format: i.items.Format, - KnownFormats: i.KnownFormats, + return newFormatValidator( + "", + i.in, + i.items.Format, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) redeem() { + poolOfItemsValidators.RedeemValidator(i) +} + +func (i *itemsValidator) redeemChildren() { + for idx, validator := range i.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // free up allocated children if not in pool } } @@ -142,6 +220,28 @@ type basicCommonValidator struct { In string Default interface{} Enum []interface{} + Options *SchemaValidatorOptions +} + +func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var b *basicCommonValidator + if opts.recycleValidators { + b = poolOfBasicCommonValidators.BorrowValidator() + } else { + b = new(basicCommonValidator) + } + + b.Path = path + b.In = in + b.Default = def + b.Enum = enum + b.Options = opts + + return b } func (b *basicCommonValidator) SetPath(path string) { @@ -152,255 +252,458 @@ func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Header: return true + default: + return false } - return false } func (b *basicCommonValidator) Validate(data interface{}) (res *Result) { - if len(b.Enum) > 0 { - for _, enumValue := range b.Enum { - actualType := reflect.TypeOf(enumValue) - if actualType != nil { // Safeguard - expectedValue := reflect.ValueOf(data) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { - return nil - } - } - } + if b.Options.recycleValidators { + defer func() { + b.redeem() + }() + } + + if len(b.Enum) == 0 { + return nil + } + + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard + continue + } + + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && + expectedValue.Type().ConvertibleTo(actualType) && + reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil } - return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum)) } - return nil + + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult) +} + +func (b *basicCommonValidator) redeem() { + poolOfBasicCommonValidators.RedeemValidator(b) } // A HeaderValidator has very limited subset of validations to apply type HeaderValidator struct { name string header *spec.Header - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewHeaderValidator creates a new header validator object -func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator { - p := &HeaderValidator{name: name, header: header, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{header.Type}), - Nullable: header.Nullable, - Format: header.Format, - In: "header", - Path: name, - }, +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newHeaderValidator(name, header, formats, opts) +} + +func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *HeaderValidator + if opts.recycleValidators { + p = poolOfHeaderValidators.BorrowValidator() + } else { + p = new(HeaderValidator) + } + + p.name = name + p.header = header + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + name, + "header", + spec.StringOrArray([]string{header.Type}), + header.Nullable, + header.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the value of the header against its schema func (p *HeaderValidator) Validate(data interface{}) *Result { - result := new(Result) + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - for _, validator := range p.validators { - if validator.Applies(p.header, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.header, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + if err := validator.Validate(data); err != nil { + result.Merge(err) + if err.HasErrors() { + break } } } - return nil + return result } func (p *HeaderValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Enum: p.header.Enum, - } + return newBasicCommonValidator( + p.name, + "response", + p.header.Default, + p.header.Enum, + p.Options, + ) } func (p *HeaderValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MaxItems: p.header.MaxItems, - MinItems: p.header.MinItems, - UniqueItems: p.header.UniqueItems, - Items: p.header.Items, - Source: p.header, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.name, + "response", + p.header.Default, + p.header.MaxItems, + p.header.MinItems, + p.header.UniqueItems, + p.header.Items, + p.header, + p.KnownFormats, + p.Options, + ) } func (p *HeaderValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MultipleOf: p.header.MultipleOf, - Maximum: p.header.Maximum, - ExclusiveMaximum: p.header.ExclusiveMaximum, - Minimum: p.header.Minimum, - ExclusiveMinimum: p.header.ExclusiveMinimum, - Type: p.header.Type, - Format: p.header.Format, - } + return newNumberValidator( + p.name, + "response", + p.header.Default, + p.header.MultipleOf, + p.header.Maximum, + p.header.ExclusiveMaximum, + p.header.Minimum, + p.header.ExclusiveMinimum, + p.header.Type, + p.header.Format, + p.Options, + ) } func (p *HeaderValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Required: true, - MaxLength: p.header.MaxLength, - MinLength: p.header.MinLength, - Pattern: p.header.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + p.name, + "response", + p.header.Default, + true, + false, + p.header.MaxLength, + p.header.MinLength, + p.header.Pattern, + p.Options, + ) } func (p *HeaderValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.name, - In: "response", - //Default: p.header.Default, - Format: p.header.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.name, + "response", + p.header.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) redeem() { + poolOfHeaderValidators.RedeemValidator(p) +} + +func (p *HeaderValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } // A ParamValidator has very limited subset of validations to apply type ParamValidator struct { param *spec.Parameter - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewParamValidator creates a new param validator object -func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator { - p := &ParamValidator{param: param, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{param.Type}), - Nullable: param.Nullable, - Format: param.Format, - In: param.In, - Path: param.Name, - }, +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newParamValidator(param, formats, opts) +} + +func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *ParamValidator + if opts.recycleValidators { + p = poolOfParamValidators.BorrowValidator() + } else { + p = new(ParamValidator) + } + + p.param = param + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + param.Name, + param.In, + spec.StringOrArray([]string{param.Type}), + param.Nullable, + param.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the data against the description of the parameter func (p *ParamValidator) Validate(data interface{}) *Result { - result := new(Result) + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + // TODO: validate type - for _, validator := range p.validators { - if validator.Applies(p.param, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.param, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + if err := validator.Validate(data); err != nil { + result.Merge(err) + if err.HasErrors() { + break } } } - return nil + + return result } func (p *ParamValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - Enum: p.param.Enum, - } + return newBasicCommonValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Enum, + p.Options, + ) } func (p *ParamValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MaxItems: p.param.MaxItems, - MinItems: p.param.MinItems, - UniqueItems: p.param.UniqueItems, - Items: p.param.Items, - Source: p.param, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MaxItems, + p.param.MinItems, + p.param.UniqueItems, + p.param.Items, + p.param, + p.KnownFormats, + p.Options, + ) } func (p *ParamValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MultipleOf: p.param.MultipleOf, - Maximum: p.param.Maximum, - ExclusiveMaximum: p.param.ExclusiveMaximum, - Minimum: p.param.Minimum, - ExclusiveMinimum: p.param.ExclusiveMinimum, - Type: p.param.Type, - Format: p.param.Format, - } + return newNumberValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MultipleOf, + p.param.Maximum, + p.param.ExclusiveMaximum, + p.param.Minimum, + p.param.ExclusiveMinimum, + p.param.Type, + p.param.Format, + p.Options, + ) } func (p *ParamValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - AllowEmptyValue: p.param.AllowEmptyValue, - Required: p.param.Required, - MaxLength: p.param.MaxLength, - MinLength: p.param.MinLength, - Pattern: p.param.Pattern, - } + return newStringValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Required, + p.param.AllowEmptyValue, + p.param.MaxLength, + p.param.MinLength, + p.param.Pattern, + p.Options, + ) } func (p *ParamValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.param.Name, - In: p.param.In, - //Default: p.param.Default, - Format: p.param.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.param.Name, + p.param.In, + p.param.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) redeem() { + poolOfParamValidators.RedeemValidator(p) +} + +func (p *ParamValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } type basicSliceValidator struct { - Path string - In string - Default interface{} - MaxItems *int64 - MinItems *int64 - UniqueItems bool - Items *spec.Items - Source interface{} - itemsValidator *itemsValidator - KnownFormats strfmt.Registry + Path string + In string + Default interface{} + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source interface{} + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newBasicSliceValidator( + path, in string, + def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, + source interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *basicSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *basicSliceValidator + if opts.recycleValidators { + s = poolOfBasicSliceValidators.BorrowValidator() + } else { + s = new(basicSliceValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.MaxItems = maxItems + s.MinItems = minItems + s.UniqueItems = uniqueItems + s.Items = items + s.Source = source + s.KnownFormats = formats + s.Options = opts + + return s } func (s *basicSliceValidator) SetPath(path string) { @@ -411,60 +714,56 @@ func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) boo switch source.(type) { case *spec.Parameter, *spec.Items, *spec.Header: return kind == reflect.Slice + default: + return false } - return false } func (s *basicSliceValidator) Validate(data interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } val := reflect.ValueOf(data) size := int64(val.Len()) if s.MinItems != nil { if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxItems != nil { if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.UniqueItems { if err := UniqueItems(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } - if s.itemsValidator == nil && s.Items != nil { - s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats) + if s.Items == nil { + return nil } - if s.itemsValidator != nil { - for i := 0; i < int(size); i++ { - ele := val.Index(i) - if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { - return err - } + for i := 0; i < int(size); i++ { + itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) + ele := val.Index(i) + if err := itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { + return err } } + return nil } -/* unused -func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool { - dict := make(map[interface{}]struct{}) - for i := 0; i < size; i++ { - ele := value.Index(i) - if _, ok := dict[ele.Interface()]; ok { - return true - } - dict[ele.Interface()] = struct{}{} - } - return false +func (s *basicSliceValidator) redeem() { + poolOfBasicSliceValidators.RedeemValidator(s) } -*/ type numberValidator struct { Path string @@ -476,8 +775,40 @@ type numberValidator struct { Minimum *float64 ExclusiveMinimum bool // Allows for more accurate behavior regarding integers - Type string - Format string + Type string + Format string + Options *SchemaValidatorOptions +} + +func newNumberValidator( + path, in string, def interface{}, + multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, + typ, format string, + opts *SchemaValidatorOptions) *numberValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var n *numberValidator + if opts.recycleValidators { + n = poolOfNumberValidators.BorrowValidator() + } else { + n = new(numberValidator) + } + + n.Path = path + n.In = in + n.Default = def + n.MultipleOf = multipleOf + n.Maximum = maximum + n.ExclusiveMaximum = exclusiveMaximum + n.Minimum = minimum + n.ExclusiveMinimum = exclusiveMinimum + n.Type = typ + n.Format = format + n.Options = opts + + return n } func (n *numberValidator) SetPath(path string) { @@ -489,12 +820,10 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: isInt := kind >= reflect.Int && kind <= reflect.Uint64 isFloat := kind == reflect.Float32 || kind == reflect.Float64 - r := isInt || isFloat - debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat) - return r + return isInt || isFloat + default: + return false } - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind) - return false } // Validate provides a validator for generic JSON numbers, @@ -519,11 +848,22 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { // // TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) func (n *numberValidator) Validate(val interface{}) *Result { - res := new(Result) + if n.Options.recycleValidators { + defer func() { + n.redeem() + }() + } - resMultiple := new(Result) - resMinimum := new(Result) - resMaximum := new(Result) + var res *Result + if n.Options.recycleResult { + res = poolOfResults.BorrowResult() + } else { + res = new(Result) + } + + resMultiple := poolOfResults.BorrowResult() + resMinimum := poolOfResults.BorrowResult() + resMaximum := poolOfResults.BorrowResult() // Used only to attempt to validate constraint on value, // even though value or constraint specified do not match type and format @@ -538,12 +878,12 @@ func (n *numberValidator) Validate(val interface{}) *Result { if resMultiple.IsValid() { // Constraint validated with compatible types if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } @@ -554,12 +894,12 @@ func (n *numberValidator) Validate(val interface{}) *Result { if resMaximum.IsValid() { // Constraint validated with compatible types if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } @@ -570,12 +910,12 @@ func (n *numberValidator) Validate(val interface{}) *Result { if resMinimum.IsValid() { // Constraint validated with compatible types if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } @@ -584,15 +924,48 @@ func (n *numberValidator) Validate(val interface{}) *Result { return res } +func (n *numberValidator) redeem() { + poolOfNumberValidators.RedeemValidator(n) +} + type stringValidator struct { + Path string + In string Default interface{} Required bool AllowEmptyValue bool MaxLength *int64 MinLength *int64 Pattern string - Path string - In string + Options *SchemaValidatorOptions +} + +func newStringValidator( + path, in string, + def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string, + opts *SchemaValidatorOptions) *stringValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *stringValidator + if opts.recycleValidators { + s = poolOfStringValidators.BorrowValidator() + } else { + s = new(stringValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.Required = required + s.AllowEmptyValue = allowEmpty + s.MaxLength = maxLength + s.MinLength = minLength + s.Pattern = pattern + s.Options = opts + + return s } func (s *stringValidator) SetPath(path string) { @@ -602,42 +975,50 @@ func (s *stringValidator) SetPath(path string) { func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: - r := kind == reflect.String - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r + return kind == reflect.String + default: + return false } - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind) - return false } func (s *stringValidator) Validate(val interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + data, ok := val.(string) if !ok { - return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val)) + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult) } if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { if err := RequiredString(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxLength != nil { if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MinLength != nil { if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.Pattern != "" { if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } return nil } + +func (s *stringValidator) redeem() { + poolOfStringValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go deleted file mode 100644 index c6f66f1039..0000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONUnmarshalV2 = false - -// UnmarshalNext unmarshals the next JSON object from d into m. -func UnmarshalNext(d *json.Decoder, m proto.Message) error { - return new(Unmarshaler).UnmarshalNext(d, m) -} - -// Unmarshal unmarshals a JSON object from r into m. -func Unmarshal(r io.Reader, m proto.Message) error { - return new(Unmarshaler).Unmarshal(r, m) -} - -// UnmarshalString unmarshals a JSON object from s into m. -func UnmarshalString(s string, m proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // AllowUnknownFields specifies whether to allow messages to contain - // unknown JSON fields, as opposed to failing to unmarshal. - AllowUnknownFields bool - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize the way -// they are unmarshaled from JSON. Messages that implement this should also -// implement JSONPBMarshaler so that the custom format can be produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Unmarshal unmarshals a JSON object from r into m. -func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { - return u.UnmarshalNext(json.NewDecoder(r), m) -} - -// UnmarshalNext unmarshals the next JSON object from d into m. -func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { - if m == nil { - return errors.New("invalid nil message") - } - - // Parse the next JSON object from the stream. - raw := json.RawMessage{} - if err := d.Decode(&raw); err != nil { - return err - } - - // Check for custom unmarshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsu, ok := m.(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, raw) - } - - mr := proto.MessageReflect(m) - - // NOTE: For historical reasons, a top-level null is treated as a noop. - // This is incorrect, but kept for compatibility. - if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { - return nil - } - - if wrapJSONUnmarshalV2 { - // NOTE: If input message is non-empty, we need to preserve merge semantics - // of the old jsonpb implementation. These semantics are not supported by - // the protobuf JSON specification. - isEmpty := true - mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { - isEmpty = false // at least one iteration implies non-empty - return false - }) - if !isEmpty { - // Perform unmarshaling into a newly allocated, empty message. - mr = mr.New() - - // Use a defer to copy all unmarshaled fields into the original message. - dst := proto.MessageReflect(m) - defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - dst.Set(fd, v) - return true - }) - } - - // Unmarshal using the v2 JSON unmarshaler. - opts := protojson.UnmarshalOptions{ - DiscardUnknown: u.AllowUnknownFields, - } - if u.AnyResolver != nil { - opts.Resolver = anyResolver{u.AnyResolver} - } - return opts.Unmarshal(raw, mr.Interface()) - } else { - if err := u.unmarshalMessage(mr, raw); err != nil { - return err - } - return protoV2.CheckInitialized(mr.Interface()) - } -} - -func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { - md := m.Descriptor() - fds := md.Fields() - - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, in) - } - - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - - switch wellKnownType(md.FullName()) { - case "Any": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - rawTypeURL, ok := jsonObject["@type"] - if !ok { - return errors.New("Any JSON doesn't have '@type'") - } - typeURL, err := unquoteString(string(rawTypeURL)) - if err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) - } - m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) - - var m2 protoreflect.Message - if u.AnyResolver != nil { - mi, err := u.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - if err == protoregistry.NotFound { - return fmt.Errorf("could not resolve Any message type: %v", typeURL) - } - return err - } - m2 = mt.New() - } - - if wellKnownType(m2.Descriptor().FullName()) != "" { - rawValue, ok := jsonObject["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - if err := u.unmarshalMessage(m2, rawValue); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } else { - delete(jsonObject, "@type") - rawJSON, err := json.Marshal(jsonObject) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - if err = u.unmarshalMessage(m2, rawJSON); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } - - rawWire, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) - return nil - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - v, err := u.unmarshalValue(m.NewField(fd), in, fd) - if err != nil { - return err - } - m.Set(fd, v) - return nil - case "Duration": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - d, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - sec := d.Nanoseconds() / 1e9 - nsec := d.Nanoseconds() % 1e9 - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Timestamp": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, v) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - sec := t.Unix() - nsec := t.Nanosecond() - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Value": - switch { - case string(in) == "null": - m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) - case string(in) == "true": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) - case string(in) == "false": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) - case hasPrefixAndSuffix('"', in, '"'): - s, err := unquoteString(string(in)) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) - case hasPrefixAndSuffix('[', in, ']'): - v := m.Mutable(fds.ByNumber(6)) - return u.unmarshalMessage(v.Message(), in) - case hasPrefixAndSuffix('{', in, '}'): - v := m.Mutable(fds.ByNumber(5)) - return u.unmarshalMessage(v.Message(), in) - default: - f, err := strconv.ParseFloat(string(in), 0) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) - } - return nil - case "ListValue": - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - lv := m.Mutable(fds.ByNumber(1)).List() - for _, raw := range jsonArray { - ve := lv.NewElement() - if err := u.unmarshalMessage(ve.Message(), raw); err != nil { - return err - } - lv.Append(ve) - } - return nil - case "Struct": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - mv := m.Mutable(fds.ByNumber(1)).Map() - for key, raw := range jsonObject { - kv := protoreflect.ValueOf(key).MapKey() - vv := mv.NewValue() - if err := u.unmarshalMessage(vv.Message(), raw); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) - } - mv.Set(kv, vv) - } - return nil - } - - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - // Handle known fields. - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if fd.IsWeak() && fd.Message().IsPlaceholder() { - continue // weak reference is not linked in - } - - // Search for any raw JSON value associated with this field. - var raw json.RawMessage - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - name = string(fd.JSONName()) - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - // Handle extension fields. - for name, raw := range jsonObject { - if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { - continue - } - - // Resolve the extension field by name. - xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(md) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - continue - } - delete(jsonObject, name) - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - if !u.AllowUnknownFields && len(jsonObject) > 0 { - for name := range jsonObject { - return fmt.Errorf("unknown field %q in %v", name, md.FullName()) - } - } - return nil -} - -func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { - if fd.Cardinality() == protoreflect.Repeated { - return false - } - if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" - } - if ed := fd.Enum(); ed != nil { - return ed.FullName() == "google.protobuf.NullValue" - } - return false -} - -func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { - if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { - _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) - return ok - } - return false -} - -func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch { - case fd.IsList(): - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return v, err - } - lv := v.List() - for _, raw := range jsonArray { - ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) - if err != nil { - return v, err - } - lv.Append(ve) - } - return v, nil - case fd.IsMap(): - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return v, err - } - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - for key, raw := range jsonObject { - var kv protoreflect.MapKey - if kfd.Kind() == protoreflect.StringKind { - kv = protoreflect.ValueOf(key).MapKey() - } else { - v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) - if err != nil { - return v, err - } - kv = v.MapKey() - } - - vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) - if err != nil { - return v, err - } - mv.Set(kv, vv) - } - return v, nil - default: - return u.unmarshalSingularValue(v, in, fd) - } -} - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(+1), - `"-Infinity"`: math.Inf(-1), -} - -func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - return unmarshalValue(in, new(bool)) - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return unmarshalValue(trimQuote(in), new(int32)) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return unmarshalValue(trimQuote(in), new(int64)) - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return unmarshalValue(trimQuote(in), new(uint32)) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return unmarshalValue(trimQuote(in), new(uint64)) - case protoreflect.FloatKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat32(float32(f)), nil - } - return unmarshalValue(trimQuote(in), new(float32)) - case protoreflect.DoubleKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat64(float64(f)), nil - } - return unmarshalValue(trimQuote(in), new(float64)) - case protoreflect.StringKind: - return unmarshalValue(in, new(string)) - case protoreflect.BytesKind: - return unmarshalValue(in, new([]byte)) - case protoreflect.EnumKind: - if hasPrefixAndSuffix('"', in, '"') { - vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) - if vd == nil { - return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) - } - return protoreflect.ValueOfEnum(vd.Number()), nil - } - return unmarshalValue(in, new(protoreflect.EnumNumber)) - case protoreflect.MessageKind, protoreflect.GroupKind: - err := u.unmarshalMessage(v.Message(), in) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } -} - -func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { - err := json.Unmarshal(in, v) - return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err -} - -func unquoteString(in string) (out string, err error) { - err = json.Unmarshal([]byte(in), &out) - return out, err -} - -func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { - if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { - return true - } - return false -} - -// trimQuote is like unquoteString but simply strips surrounding quotes. -// This is incorrect, but is behavior done by the legacy implementation. -func trimQuote(in []byte) []byte { - if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { - in = in[1 : len(in)-1] - } - return in -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go deleted file mode 100644 index e9438a93f3..0000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONMarshalV2 = false - -// Marshaler is a configurable object for marshaling protocol buffer messages -// to the specified JSON representation. -type Marshaler struct { - // OrigName specifies whether to use the original protobuf name for fields. - OrigName bool - - // EnumsAsInts specifies whether to render enum values as integers, - // as opposed to string values. - EnumsAsInts bool - - // EmitDefaults specifies whether to render fields with zero values. - EmitDefaults bool - - // Indent controls whether the output is compact or not. - // If empty, the output is compact JSON. Otherwise, every JSON object - // entry and JSON array value will be on its own line. - // Each line will be preceded by repeated copies of Indent, where the - // number of copies is the current indentation depth. - Indent string - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should also -// implement JSONPBUnmarshaler so that the custom format can be parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// Marshal serializes a protobuf message as JSON into w. -func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { - b, err := jm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// MarshalToString serializes a protobuf message as JSON in string form. -func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { - b, err := jm.marshal(m) - if err != nil { - return "", err - } - return string(b), nil -} - -func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { - v := reflect.ValueOf(m) - if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, errors.New("Marshal called with nil") - } - - // Check for custom marshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsm, ok := m.(JSONPBMarshaler); ok { - return jsm.MarshalJSONPB(jm) - } - - if wrapJSONMarshalV2 { - opts := protojson.MarshalOptions{ - UseProtoNames: jm.OrigName, - UseEnumNumbers: jm.EnumsAsInts, - EmitUnpopulated: jm.EmitDefaults, - Indent: jm.Indent, - } - if jm.AnyResolver != nil { - opts.Resolver = anyResolver{jm.AnyResolver} - } - return opts.Marshal(proto.MessageReflect(m).Interface()) - } else { - // Check for unpopulated required fields first. - m2 := proto.MessageReflect(m) - if err := protoV2.CheckInitialized(m2.Interface()); err != nil { - return nil, err - } - - w := jsonWriter{Marshaler: jm} - err := w.marshalMessage(m2, "", "") - return w.buf, err - } -} - -type jsonWriter struct { - *Marshaler - buf []byte -} - -func (w *jsonWriter) write(s string) { - w.buf = append(w.buf, s...) -} - -func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { - if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(w.Marshaler) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - w.write(string(b)) - return nil - } - - md := m.Descriptor() - fds := md.Fields() - - // Handle well-known types. - const secondInNanos = int64(time.Second / time.Nanosecond) - switch wellKnownType(md.FullName()) { - case "Any": - return w.marshalAny(m, indent) - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - case "Duration": - const maxSecondsInDuration = 315576000000 - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - var sign string - if s < 0 || ns < 0 { - sign, s, ns = "-", -1*s, -1*ns - } - x := fmt.Sprintf("%s%d.%09d", sign, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vs"`, x)) - return nil - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vZ"`, x)) - return nil - case "Value": - // JSON value; which is a null, number, string, bool, object, or array. - od := md.Oneofs().Get(0) - fd := m.WhichOneof(od) - if fd == nil { - return errors.New("nil Value") - } - return w.marshalValue(fd, m.Get(fd), indent) - case "Struct", "ListValue": - // JSON object or array. - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - - firstField := true - if typeURL != "" { - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue - } - } else { - i++ - } - - v := m.Get(fd) - - if !m.Has(fd) { - if !w.EmitDefaults || fd.ContainingOneof() != nil { - continue - } - if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { - v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars - } - } - - if !firstField { - w.writeComma() - } - if err := w.marshalField(fd, v, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if md.ExtensionRanges().Len() > 0 { - // Collect a sorted list of all extension descriptor and values. - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - if !firstField { - w.writeComma() - } - if err := w.marshalField(ext.desc, ext.val, indent); err != nil { - return err - } - firstField = false - } - } - - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) writeComma() { - if w.Indent != "" { - w.write(",\n") - } else { - w.write(",") - } -} - -func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - md := m.Descriptor() - typeURL := m.Get(md.Fields().ByNumber(1)).String() - rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() - - var m2 protoreflect.Message - if w.AnyResolver != nil { - mi, err := w.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - return err - } - m2 = mt.New() - } - - if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { - return err - } - - if wellKnownType(m2.Descriptor().FullName()) == "" { - return w.marshalMessage(m2, indent, typeURL) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - w.writeComma() - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - w.write(`"value": `) - } else { - w.write(`"value":`) - } - if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { - return err - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"@type":`) - if w.Indent != "" { - w.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - w.write(string(b)) - return nil -} - -// marshalField writes field description and value to the Writer. -func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"`) - switch { - case fd.IsExtension(): - // For message set, use the fname of the message as the extension name. - name := string(fd.FullName()) - if isMessageSet(fd.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - w.write("[" + name + "]") - case w.OrigName: - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - w.write(name) - default: - w.write(string(fd.JSONName())) - } - w.write(`":`) - if w.Indent != "" { - w.write(" ") - } - return w.marshalValue(fd, v, indent) -} - -func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case fd.IsList(): - w.write("[") - comma := "" - lv := v.List() - for i := 0; i < lv.Len(); i++ { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write("]") - return nil - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - - // Collect a sorted list of all map keys and values. - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - - w.write(`{`) - comma := "" - for _, entry := range entries { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - - s := fmt.Sprint(entry.key.Interface()) - b, err := json.Marshal(s) - if err != nil { - return err - } - w.write(string(b)) - - w.write(`:`) - if w.Indent != "" { - w.write(` `) - } - - if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write(`}`) - return nil - default: - return w.marshalSingularValue(fd, v, indent) - } -} - -func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case !v.IsValid(): - w.write("null") - return nil - case fd.Message() != nil: - return w.marshalMessage(v.Message(), indent+w.Indent, "") - case fd.Enum() != nil: - if fd.Enum().FullName() == "google.protobuf.NullValue" { - w.write("null") - return nil - } - - vd := fd.Enum().Values().ByNumber(v.Enum()) - if vd == nil || w.EnumsAsInts { - w.write(strconv.Itoa(int(v.Enum()))) - } else { - w.write(`"` + string(vd.Name()) + `"`) - } - return nil - default: - switch v.Interface().(type) { - case float32, float64: - switch { - case math.IsInf(v.Float(), +1): - w.write(`"Infinity"`) - return nil - case math.IsInf(v.Float(), -1): - w.write(`"-Infinity"`) - return nil - case math.IsNaN(v.Float()): - w.write(`"NaN"`) - return nil - } - case int64, uint64: - w.write(fmt.Sprintf(`"%d"`, v.Interface())) - return nil - } - - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - w.write(string(b)) - return nil - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go deleted file mode 100644 index 480e2448de..0000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/json.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jsonpb provides functionality to marshal and unmarshal between a -// protocol buffer message and JSON. It follows the specification at -// https://developers.google.com/protocol-buffers/docs/proto3#json. -// -// Do not rely on the default behavior of the standard encoding/json package -// when called on generated message types as it does not operate correctly. -// -// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" -// package instead. -package jsonpb - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// AnyResolver takes a type URL, present in an Any message, -// and resolves it into an instance of the associated message. -type AnyResolver interface { - Resolve(typeURL string) (proto.Message, error) -} - -type anyResolver struct{ AnyResolver } - -func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return r.FindMessageByURL(string(message)) -} - -func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - m, err := r.Resolve(url) - if err != nil { - return nil, err - } - return protoimpl.X.MessageTypeOf(m), nil -} - -func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -func wellKnownType(s protoreflect.FullName) string { - if s.Parent() == "google.protobuf" { - switch s.Name() { - case "Empty", "Any", - "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue", - "Duration", "Timestamp", - "NullValue", "Struct", "Value", "ListValue": - return string(s.Name()) - } - } - return "" -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go index c6d09dae40..720f3cdf57 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -14,22 +14,29 @@ import ( ) // SortSlices returns a [cmp.Transformer] option that sorts all []V. -// The less function must be of the form "func(T, T) bool" which is used to -// sort any slice with element type V that is assignable to T. +// The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any slice with element type V that is assignable to T. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // -// The less function does not have to be "total". That is, if !less(x, y) and -// !less(y, x) for two elements x and y, their relative order is maintained. +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// +// The function does not have to be "total". That is, if x != y, but +// less or compare report inequality, their relative order is maintained. // // SortSlices can be used in conjunction with [EquateEmpty]. -func SortSlices(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortSlices(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ss := sliceSorter{vf.Type().In(0), vf} return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort)) @@ -79,28 +86,40 @@ func (ss sliceSorter) checkSort(v reflect.Value) { } func (ss sliceSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ss.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } -// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. +// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be +// a sorted []struct{K, V}. The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any map with key K that is assignable to T. // // Flattening the map into a slice has the property that [cmp.Equal] is able to // use [cmp.Comparer] options on K or the K.Equal method if it exists. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // - Total: if x != y, then either less(x, y) or less(y, x) // +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if compare(x, y) < 0 and compare(y, z) < 0, then compare(x, z) < 0 +// - Total: if x != y, then compare(x, y) != 0 +// // SortMaps can be used in conjunction with [EquateEmpty]. -func SortMaps(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortMaps(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ms := mapSorter{vf.Type().In(0), vf} return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) @@ -143,5 +162,10 @@ func (ms mapSorter) checkSort(v reflect.Value) { } func (ms mapSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ms.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d43623..def01a6be3 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3b3..ba3fce81ff 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d99..fcd049de92 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae10..2af3ee3dc1 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee59..8919232fd8 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc87..8fac3841be 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed9774..e9aa5d14c0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a..418331a4bd 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7e..548f31da2d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,12 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +305,17 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, } ) @@ -454,7 +465,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +504,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1196,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1242,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1801,365 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2158,7 +2176,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2201,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2209,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2221,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2256,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2268,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2280,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2292,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2304,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2316,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2328,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2340,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2352,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2364,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2376,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2388,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2400,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2412,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2424,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2436,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2448,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2461,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c30..c93f75a78b 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a..e76509ef01 100644 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a..4057e70c8a 100644 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 85a8379d83..a6402ee48c 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -28,7 +28,6 @@ import ( "os" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" @@ -38,8 +37,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -59,9 +59,9 @@ type s2av2TransportCreds struct { transportCreds credentials.TransportCredentials tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity + localIdentity *commonpb.Identity // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity + localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) @@ -70,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -183,13 +183,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) + conn, authInfo, err := creds.ClientHandshake(timeoutCtx, serverName, rawConn) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -197,7 +191,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } return nil, nil, err } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + grpclog.Infof("client-side handshake is done using S2Av2 to: %s", serverName) return conn, authInfo, err } @@ -247,13 +241,7 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) + conn, authInfo, err := creds.ServerHandshake(rawConn) if err != nil { grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) return nil, nil, err @@ -280,15 +268,15 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { tokenManager = *c.tokenManager } verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity + var localIdentity *commonpb.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) } - var localIdentities []*commonpbv1.Identity + var localIdentities []*commonpb.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) } } creds := &s2av2TransportCreds{ diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 4d91913229..fa0002e36b 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -44,8 +43,8 @@ const ( ) // GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpb.Identity{localIdentity}) if grpclog.V(1) { grpclog.Infof("Sending request to S2Av2 for client TLS config.") @@ -126,7 +125,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr } // GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { return &tls.Config{ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), }, nil @@ -136,7 +135,7 @@ func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager token // connection with a client, based on SNI communicated during ClientHello. // Ensures that server presents the correct certificate to establish a TLS // connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) if err != nil { @@ -219,9 +218,9 @@ func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { } } -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity + var locID *commonpb.Identity if localIdentities != nil { locID = localIdentities[0] } @@ -283,7 +282,7 @@ func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsCo return clientAuth } -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity) []*s2av2pb.AuthenticationMechanism { if tokenManager == nil { return nil } diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 5ecb06f930..cc79bd09a6 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -29,7 +29,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker" "github.com/google/s2a-go/internal/handshaker/service" @@ -38,8 +37,10 @@ import ( "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -54,17 +55,17 @@ const ( // credentials.TransportCredentials interface. type s2aTransportCreds struct { info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion + minTLSVersion commonpbv1.TLSVersion + maxTLSVersion commonpbv1.TLSVersion // tlsCiphersuites contains the ciphersuites used in the S2A connection. // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite + tlsCiphersuites []commonpbv1.Ciphersuite // localIdentity should only be used by the client. - localIdentity *commonpb.Identity + localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity + localIdentities []*commonpbv1.Identity // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity + targetIdentities []*commonpbv1.Identity isClient bool s2aAddr string ensureProcessSessionTickets *sync.WaitGroup @@ -76,7 +77,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil client options") } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity for _, targetIdentity := range opts.TargetIdentities { protoTargetIdentity, err := toProtoIdentity(targetIdentity) if err != nil { @@ -93,12 +94,12 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentity: localIdentity, targetIdentities: targetIdentities, @@ -112,7 +113,11 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + v2LocalIdentity, err := toV2ProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -121,7 +126,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil server options") } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity for _, localIdentity := range opts.LocalIdentities { protoLocalIdentity, err := toProtoIdentity(localIdentity) if err != nil { @@ -134,12 +139,12 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentities: localIdentities, isClient: false, @@ -147,7 +152,15 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) + var v2LocalIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toV2ProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + v2LocalIdentities = append(v2LocalIdentities, protoLocalIdentity) + } + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -248,22 +261,22 @@ func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { info := *c.info - var localIdentity *commonpb.Identity + var localIdentity *commonpbv1.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) } } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + targetIdentities = make([]*commonpbv1.Identity, len(c.targetIdentities)) for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpbv1.Identity) } } return &s2aTransportCreds{ @@ -351,6 +364,12 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE case Spiffe: return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + case ReservedCustomVerificationMode3: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 + case ReservedCustomVerificationMode4: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 + case ReservedCustomVerificationMode5: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } @@ -396,24 +415,20 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net defer cancel() var s2aTLSConfig *tls.Config + var c net.Conn retry.Run(timeoutCtx, func() error { s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ ServerName: serverName, }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return err + } - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } c, err = s2aDialer.DialContext(timeoutCtx, network, addr) return err }) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index fcdbc1621b..5bbf31bf41 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -28,7 +28,8 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) // Identity is the interface for S2A identities. @@ -76,9 +77,12 @@ type VerificationModeType int // Three types of verification modes. const ( - Unspecified = iota - ConnectToGoogle + Unspecified VerificationModeType = iota Spiffe + ConnectToGoogle + ReservedCustomVerificationMode3 + ReservedCustomVerificationMode4 + ReservedCustomVerificationMode5 ) // ClientOptions contains the client-side options used to establish a secure @@ -198,7 +202,23 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { +func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} + +func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { if identity == nil { return nil, nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index ef508417b3..29a5900c7d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.0" + "v2": "2.14.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index ae71149470..9fb9035908 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,54 @@ # Changelog +## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) + + +### Features + +* **internallog:** add a logging support package ([#380](https://github.com/googleapis/gax-go/issues/380)) ([c877470](https://github.com/googleapis/gax-go/commit/c87747098135631a3de5865ed03aaf2c79fd9319)) + +## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) + + +### Features + +* **iterator:** add package to help work with new iter.Seq types ([#358](https://github.com/googleapis/gax-go/issues/358)) ([6bccdaa](https://github.com/googleapis/gax-go/commit/6bccdaac011fe6fd147e4eb533a8e6520b7d4acc)) + +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + +## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) + + +### Bug Fixes + +* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf)) + +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + +## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) + + +### Bug Fixes + +* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) + +## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) + + +### Bug Fixes + +* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) + ## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065ca..7de60773d6 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go index af15fb5827..f5af5c990f 100644 --- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -38,6 +38,14 @@ import ( ) const ( + // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] + // that specifies the response read mask. The value(s) for this header + // must adhere to format described in [fieldmaskpb]. + // + // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters + // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb + XGoogFieldMaskHeader = "x-goog-fieldmask" + headerKey = contextKey("header") ) @@ -66,9 +74,27 @@ func SetHeaders(ctx context.Context, keyvals ...string) context.Context { h, ok := ctx.Value(headerKey).(map[string][]string) if !ok { h = make(map[string][]string) + } else { + h = cloneHeaders(h) } + for i := 0; i < len(keyvals); i = i + 2 { h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) } return context.WithValue(ctx, headerKey, h) } + +// cloneHeaders makes a new key-value map while reusing the value slices. +// As such, new values should be appended to the value slice, and modifying +// indexed values is not thread safe. +// +// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. +func cloneHeaders(h map[string][]string) map[string][]string { + c := make(map[string][]string, len(h)) + for k, v := range h { + vc := make([]string, len(v)) + copy(vc, v) + c[k] = vc + } + return c +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 453fab7ecc..f5273985af 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -103,7 +103,9 @@ func goVersion() string { return "UNKNOWN" } -// XGoogHeader is for use by the Google Cloud Libraries only. +// XGoogHeader is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. // // XGoogHeader formats key-value pairs. // The resulting string is suitable for x-goog-api-client header. @@ -125,7 +127,8 @@ func XGoogHeader(keyval ...string) string { } // InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries -// only. +// only. See package [github.com/googleapis/gax-go/v2/callctx] for help +// setting/retrieving request/response headers. // // InsertMetadataIntoOutgoingContext returns a new context that merges the // provided keyvals metadata pairs with any existing metadata/headers in the @@ -137,7 +140,9 @@ func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) c return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) } -// BuildHeaders is for use by the Google Cloud Libraries only. +// BuildHeaders is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. // // BuildHeaders returns a new http.Header that merges the provided // keyvals header pairs with any existing metadata/headers in the provided @@ -158,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } + } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} } + return out } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 7425b5ffbb..8828893454 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.0" +const Version = "2.14.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go new file mode 100644 index 0000000000..19f4be35c2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go @@ -0,0 +1,134 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internal provides some common logic and types to other logging +// sub-packages. +package internal + +import ( + "context" + "io" + "log/slog" + "os" + "strings" + "time" +) + +const ( + // LoggingLevelEnvVar is the environment variable used to enable logging + // at a particular level. + LoggingLevelEnvVar = "GOOGLE_SDK_GO_LOGGING_LEVEL" + + googLvlKey = "severity" + googMsgKey = "message" + googSourceKey = "sourceLocation" + googTimeKey = "timestamp" +) + +// NewLoggerWithWriter is exposed for testing. +func NewLoggerWithWriter(w io.Writer) *slog.Logger { + lvl, loggingEnabled := checkLoggingLevel() + if !loggingEnabled { + return slog.New(noOpHandler{}) + } + return slog.New(newGCPSlogHandler(lvl, w)) +} + +// checkLoggingLevel returned the configured logging level and whether or not +// logging is enabled. +func checkLoggingLevel() (slog.Leveler, bool) { + sLevel := strings.ToLower(os.Getenv(LoggingLevelEnvVar)) + var level slog.Level + switch sLevel { + case "debug": + level = slog.LevelDebug + case "info": + level = slog.LevelInfo + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + return nil, false + } + return level, true +} + +// newGCPSlogHandler returns a Handler that is configured to output in a JSON +// format with well-known keys. For more information on this format see +// https://cloud.google.com/logging/docs/agent/logging/configuration#special-fields. +func newGCPSlogHandler(lvl slog.Leveler, w io.Writer) slog.Handler { + return slog.NewJSONHandler(w, &slog.HandlerOptions{ + Level: lvl, + ReplaceAttr: replaceAttr, + }) +} + +// replaceAttr remaps default Go logging keys to match what is expected in +// cloud logging. +func replaceAttr(groups []string, a slog.Attr) slog.Attr { + if groups == nil { + if a.Key == slog.LevelKey { + a.Key = googLvlKey + return a + } else if a.Key == slog.MessageKey { + a.Key = googMsgKey + return a + } else if a.Key == slog.SourceKey { + a.Key = googSourceKey + return a + } else if a.Key == slog.TimeKey { + a.Key = googTimeKey + if a.Value.Kind() == slog.KindTime { + a.Value = slog.StringValue(a.Value.Time().Format(time.RFC3339)) + } + return a + } + } + return a +} + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go new file mode 100644 index 0000000000..91b648a6a4 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -0,0 +1,154 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internallog in intended for internal use by generated clients only. +package internallog + +import ( + "bytes" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "os" + "strings" + + "github.com/googleapis/gax-go/v2/internallog/internal" +) + +// New returns a new [slog.Logger] default logger, or the provided logger if +// non-nil. The returned logger will be a no-op logger unless the environment +// variable GOOGLE_SDK_DEBUG_LOGGING is set. +func New(l *slog.Logger) *slog.Logger { + if l != nil { + return l + } + return internal.NewLoggerWithWriter(os.Stderr) +} + +// HTTPRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func HTTPRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// HTTPResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func HTTPResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go index cc4486eb9e..9b690d40c4 100644 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -111,7 +111,8 @@ func (s *ProtoJSONStream) Recv() (proto.Message, error) { // Initialize a new instance of the protobuf message to unmarshal the // raw data into. m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + err := unm.Unmarshal(raw, m) return m, err } diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index 0739f5ff58..7c37c66a80 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -91,9 +91,7 @@ func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) } else { b.visited = b.visited[:visitedSize] - for i := range b.visited { - b.visited[i] = 0 - } + clear(b.visited) // set to 0 } if cap(b.cap) < ncap { diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index bc47f4c4a8..53cbd95839 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -6,7 +6,7 @@ package regexp import ( "regexp/syntax" - "sort" + "slices" "strings" "unicode" "unicode/utf8" @@ -33,11 +33,11 @@ type onePassInst struct { Next []uint32 } -// OnePassPrefix returns a literal string that all matches for the +// onePassPrefix returns a literal string that all matches for the // regexp must start with. Complete is true if the prefix // is the entire match. Pc is the index of the last rune instruction -// in the string. The OnePassPrefix skips over the mandatory -// EmptyBeginText +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { @@ -68,7 +68,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { return buf.String(), complete, pc } -// OnePassNext selects the next actionable state of the prog, based on the input character. +// onePassNext selects the next actionable state of the prog, based on the input character. // It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. // One of the alternates may ultimately lead without input to end of line. If the instruction // is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. @@ -218,7 +218,7 @@ func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { } } -// onePassCopy creates a copy of the original Prog, as we'll be modifying it +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. func onePassCopy(prog *syntax.Prog) *onePassProg { p := &onePassProg{ Start: prog.Start, @@ -282,13 +282,6 @@ func onePassCopy(prog *syntax.Prog) *onePassProg { return p } -// runeSlice exists to permit sorting the case-folded rune sets. -type runeSlice []rune - -func (p runeSlice) Len() int { return len(p) } -func (p runeSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p runeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} var anyRune = []rune{0, unicode.MaxRune} @@ -383,7 +376,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune...) } @@ -407,7 +400,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune[0], inst.Rune[0]) } diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index 7958a39728..d1218ad0e8 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -8,9 +8,7 @@ // general syntax used by Perl, Python, and other languages. // More precisely, it is the syntax accepted by RE2 and described at // https://golang.org/s/re2syntax, except for \C. -// For an overview of the syntax, run -// -// go doc regexp/syntax +// For an overview of the syntax, see the [regexp/syntax] package. // // The regexp implementation provided by this package is // guaranteed to run in time linear in the size of the input. @@ -23,10 +21,10 @@ // or any book about automata theory. // // All characters are UTF-8-encoded code points. -// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence // is treated as if it encoded utf8.RuneError (U+FFFD). // -// There are 16 methods of Regexp that match a regular expression and identify +// There are 16 methods of [Regexp] that match a regular expression and identify // the matched text. Their names are matched by this regular expression: // // Find(All)?(String)?(Submatch)?(Index)? @@ -82,7 +80,7 @@ import ( // Regexp is the representation of a compiled regular expression. // A Regexp is safe for concurrent use by multiple goroutines, -// except for configuration methods, such as Longest. +// except for configuration methods, such as [Regexp.Longest]. type Regexp struct { expr string // as passed to Compile prog *syntax.Prog // compiled program @@ -110,21 +108,21 @@ func (re *Regexp) String() string { return re.expr } -// Copy returns a new Regexp object copied from re. -// Calling Longest on one copy does not affect another. +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. // -// Deprecated: In earlier releases, when using a Regexp in multiple goroutines, +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, // giving each goroutine its own copy helped to avoid lock contention. // As of Go 1.12, using Copy is no longer necessary to avoid lock contention. // Copy may still be appropriate if the reason for its use is to make -// two copies with different Longest settings. +// two copies with different [Regexp.Longest] settings. func (re *Regexp) Copy() *Regexp { re2 := *re return &re2 } // Compile parses a regular expression and returns, if successful, -// a Regexp object that can be used to match against text. +// a [Regexp] object that can be used to match against text. // // When matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those @@ -132,12 +130,12 @@ func (re *Regexp) Copy() *Regexp { // This so-called leftmost-first matching is the same semantics // that Perl, Python, and other implementations use, although this // package implements it without the expense of backtracking. -// For POSIX leftmost-longest matching, see CompilePOSIX. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. func Compile(expr string) (*Regexp, error) { return compile(expr, syntax.Perl, false) } -// CompilePOSIX is like Compile but restricts the regular expression +// CompilePOSIX is like [Compile] but restricts the regular expression // to POSIX ERE (egrep) syntax and changes the match semantics to // leftmost-longest. // @@ -164,7 +162,7 @@ func CompilePOSIX(expr string) (*Regexp, error) { // That is, when matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those // it chooses a match that is as long as possible. -// This method modifies the Regexp and may not be called concurrently +// This method modifies the [Regexp] and may not be called concurrently // with any other methods. func (re *Regexp) Longest() { re.longest = true @@ -270,7 +268,7 @@ func (re *Regexp) put(m *machine) { matchPool[re.mpool].Put(m) } -// minInputLen walks the regexp to find the minimum length of any matchable input +// minInputLen walks the regexp to find the minimum length of any matchable input. func minInputLen(re *syntax.Regexp) int { switch re.Op { default: @@ -310,7 +308,7 @@ func minInputLen(re *syntax.Regexp) int { } } -// MustCompile is like Compile but panics if the expression cannot be parsed. +// MustCompile is like [Compile] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompile(str string) *Regexp { @@ -321,7 +319,7 @@ func MustCompile(str string) *Regexp { return regexp } -// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed. +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompilePOSIX(str string) *Regexp { @@ -339,13 +337,13 @@ func quote(s string) string { return strconv.Quote(s) } -// NumSubexp returns the number of parenthesized subexpressions in this Regexp. +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. func (re *Regexp) NumSubexp() int { return re.numSubexp } // SubexpNames returns the names of the parenthesized subexpressions -// in this Regexp. The name for the first sub-expression is names[1], +// in this [Regexp]. The name for the first sub-expression is names[1], // so that if m is a match slice, the name for m[i] is SubexpNames()[i]. // Since the Regexp as a whole cannot be named, names[0] is always // the empty string. The slice should not be modified. @@ -521,7 +519,7 @@ func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { return re.prefix, re.prefixComplete } -// MatchReader reports whether the text returned by the RuneReader +// MatchReader reports whether the text returned by the [io.RuneReader] // contains any match of the regular expression re. func (re *Regexp) MatchReader(r io.RuneReader) bool { return re.doMatch(r, nil, "") @@ -541,7 +539,7 @@ func (re *Regexp) Match(b []byte) bool { // MatchReader reports whether the text returned by the RuneReader // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -552,7 +550,7 @@ func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { // MatchString reports whether the string s // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchString(pattern string, s string) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -563,7 +561,7 @@ func MatchString(pattern string, s string) (matched bool, err error) { // Match reports whether the byte slice b // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func Match(pattern string, b []byte) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -572,9 +570,9 @@ func Match(pattern string, b []byte) (matched bool, err error) { return re.Match(b), nil } -// ReplaceAllString returns a copy of src, replacing matches of the Regexp -// with the replacement string repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAllString(src, repl string) string { n := 2 if strings.Contains(repl, "$") { @@ -586,9 +584,9 @@ func (re *Regexp) ReplaceAllString(src, repl string) string { return string(b) } -// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] // with the replacement string repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -596,9 +594,9 @@ func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { } // ReplaceAllStringFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched substring. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -671,9 +669,9 @@ func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst return buf } -// ReplaceAll returns a copy of src, replacing matches of the Regexp -// with the replacement text repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAll(src, repl []byte) []byte { n := 2 if bytes.IndexByte(repl, '$') >= 0 { @@ -689,9 +687,9 @@ func (re *Regexp) ReplaceAll(src, repl []byte) []byte { return b } -// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] // with the replacement bytes repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -699,9 +697,9 @@ func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { } // ReplaceAllFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched byte slice. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -845,7 +843,7 @@ func (re *Regexp) FindIndex(b []byte) (loc []int) { // FindString returns a string holding the text of the leftmost match in s of the regular // expression. If there is no match, the return value is an empty string, // but it will also be empty if the regular expression successfully matches -// an empty string. Use FindStringIndex or FindStringSubmatch if it is +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is // necessary to distinguish these cases. func (re *Regexp) FindString(s string) string { var dstCap [2]int @@ -870,7 +868,7 @@ func (re *Regexp) FindStringIndex(s string) (loc []int) { // FindReaderIndex returns a two-element slice of integers defining the // location of the leftmost match of the regular expression in text read from -// the RuneReader. The match text was found in the input stream at +// the [io.RuneReader]. The match text was found in the input stream at // byte offset loc[0] through loc[1]-1. // A return value of nil indicates no match. func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { @@ -904,7 +902,7 @@ func (re *Regexp) FindSubmatch(b []byte) [][]byte { // Expand appends template to dst and returns the result; during the // append, Expand replaces variables in the template with corresponding // matches drawn from src. The match slice should have been returned by -// FindSubmatchIndex. +// [Regexp.FindSubmatchIndex]. // // In the template, a variable is denoted by a substring of the form // $name or ${name}, where name is a non-empty sequence of letters, @@ -922,7 +920,7 @@ func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) [ return re.expand(dst, string(template), src, "", match) } -// ExpandString is like Expand but the template and source are strings. +// ExpandString is like [Regexp.Expand] but the template and source are strings. // It appends to and returns a byte slice in order to give the calling // code control over allocation. func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { @@ -1067,7 +1065,7 @@ func (re *Regexp) FindStringSubmatchIndex(s string) []int { // FindReaderSubmatchIndex returns a slice holding the index pairs // identifying the leftmost match of the regular expression of text read by -// the RuneReader, and the matches, if any, of its subexpressions, as defined +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined // by the 'Submatch' and 'Index' descriptions in the package comment. A // return value of nil indicates no match. func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { @@ -1076,7 +1074,7 @@ func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { const startSize = 10 // The size at which to start a slice in the 'All' routines. -// FindAll is the 'All' version of Find; it returns a slice of all successive +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive // matches of the expression, as defined by the 'All' description in the // package comment. // A return value of nil indicates no match. @@ -1094,7 +1092,7 @@ func (re *Regexp) FindAll(b []byte, n int) [][]byte { return result } -// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1112,7 +1110,7 @@ func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { return result } -// FindAllString is the 'All' version of FindString; it returns a slice of all +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1130,7 +1128,7 @@ func (re *Regexp) FindAllString(s string, n int) []string { return result } -// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a // slice of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1148,7 +1146,7 @@ func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { return result } -// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice // of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1172,7 +1170,7 @@ func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { return result } -// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns // a slice of all successive matches of the expression, as defined by the // 'All' description in the package comment. // A return value of nil indicates no match. @@ -1190,7 +1188,7 @@ func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { return result } -// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it // returns a slice of all successive matches of the expression, as defined by // the 'All' description in the package comment. // A return value of nil indicates no match. @@ -1215,7 +1213,7 @@ func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { } // FindAllStringSubmatchIndex is the 'All' version of -// FindStringSubmatchIndex; it returns a slice of all successive matches of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of // the expression, as defined by the 'All' description in the package // comment. // A return value of nil indicates no match. @@ -1237,8 +1235,8 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { // the substrings between those expression matches. // // The slice returned by this method consists of all the substrings of s -// not contained in the slice returned by FindAllString. When called on an expression -// that contains no metacharacters, it is equivalent to strings.SplitN. +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. // // Example: // @@ -1283,3 +1281,24 @@ func (re *Regexp) Split(s string, n int) []string { return strings } + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go index f6a4b43f7a..877f1043dd 100644 --- a/vendor/github.com/grafana/regexp/syntax/doc.go +++ b/vendor/github.com/grafana/regexp/syntax/doc.go @@ -2,17 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// DO NOT EDIT. This file is generated by mksyntaxgo from the RE2 distribution. +// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT. /* Package syntax parses regular expressions into parse trees and compiles parse trees into programs. Most clients of regular expressions will use the -facilities of package regexp (such as Compile and Match) instead of this package. +facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. # Syntax -The regular expression syntax understood by this package when parsing with the Perl flag is as follows. -Parts of the syntax can be disabled by passing alternate flags to Parse. +The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. +Parts of the syntax can be disabled by passing alternate flags to [Parse]. Single characters: @@ -56,6 +56,7 @@ Grouping: (re) numbered capturing group (submatch) (?Pre) named & numbered capturing group (submatch) + (?re) named & numbered capturing group (submatch) (?:re) non-capturing group (?flags) set flags within current group; non-capturing (?flags:re) set flags during re; non-capturing @@ -136,6 +137,6 @@ ASCII character classes: [[:word:]] word characters (== [0-9A-Za-z_]) [[:xdigit:]] hex digit (== [0-9A-Fa-f]) -Unicode character classes are those in unicode.Categories and unicode.Scripts. +Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. */ package syntax diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go index 3952b2bdd5..1368f5b7ea 100644 --- a/vendor/github.com/grafana/regexp/syntax/op_string.go +++ b/vendor/github.com/grafana/regexp/syntax/op_string.go @@ -4,6 +4,32 @@ package syntax import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpNoMatch-1] + _ = x[OpEmptyMatch-2] + _ = x[OpLiteral-3] + _ = x[OpCharClass-4] + _ = x[OpAnyCharNotNL-5] + _ = x[OpAnyChar-6] + _ = x[OpBeginLine-7] + _ = x[OpEndLine-8] + _ = x[OpBeginText-9] + _ = x[OpEndText-10] + _ = x[OpWordBoundary-11] + _ = x[OpNoWordBoundary-12] + _ = x[OpCapture-13] + _ = x[OpStar-14] + _ = x[OpPlus-15] + _ = x[OpQuest-16] + _ = x[OpRepeat-17] + _ = x[OpConcat-18] + _ = x[OpAlternate-19] + _ = x[opPseudo-128] +} + const ( _Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate" _Op_name_1 = "opPseudo" diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go index b6d348d00c..6ed6491c80 100644 --- a/vendor/github.com/grafana/regexp/syntax/parse.go +++ b/vendor/github.com/grafana/regexp/syntax/parse.go @@ -44,6 +44,7 @@ const ( ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" ErrUnexpectedParen ErrorCode = "unexpected )" ErrNestingDepth ErrorCode = "expression nests too deeply" + ErrLarge ErrorCode = "expression too large" ) func (e ErrorCode) String() string { @@ -159,7 +160,7 @@ func (p *parser) reuse(re *Regexp) { func (p *parser) checkLimits(re *Regexp) { if p.numRunes > maxRunes { - panic(ErrInternalError) + panic(ErrLarge) } p.checkSize(re) p.checkHeight(re) @@ -203,7 +204,7 @@ func (p *parser) checkSize(re *Regexp) { } if p.calcSize(re, true) > maxSize { - panic(ErrInternalError) + panic(ErrLarge) } } @@ -248,9 +249,7 @@ func (p *parser) calcSize(re *Regexp, force bool) int64 { size = int64(re.Max)*sub + int64(re.Max-re.Min) } - if size < 1 { - size = 1 - } + size = max(1, size) p.size[re] = size return size } @@ -381,14 +380,12 @@ func minFoldRune(r rune) rune { if r < minFold || r > maxFold { return r } - min := r + m := r r0 := r for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) { - if min > r { - min = r - } + m = min(m, r) } - return min + return m } // op pushes a regexp with the given op onto the stack @@ -897,8 +894,8 @@ func parse(s string, flags Flags) (_ *Regexp, err error) { panic(r) case nil: // ok - case ErrInternalError: // too big - err = &Error{Code: ErrInternalError, Expr: s} + case ErrLarge: // too big + err = &Error{Code: ErrLarge, Expr: s} case ErrNestingDepth: err = &Error{Code: ErrNestingDepth, Expr: s} } @@ -1158,9 +1155,18 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { // support all three as well. EcmaScript 4 uses only the Python form. // // In both the open source world (via Code Search) and the - // Google source tree, (?Pname) is the dominant form, - // so that's the one we implement. One is enough. - if len(t) > 4 && t[2] == 'P' && t[3] == '<' { + // Google source tree, (?Pname) and (?name) are the + // dominant forms of named captures and both are supported. + startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<' + startsWithName := len(t) > 3 && t[2] == '<' + + if startsWithP || startsWithName { + // position of expr start + exprStartPos := 4 + if startsWithName { + exprStartPos = 3 + } + // Pull out name. end := strings.IndexRune(t, '>') if end < 0 { @@ -1170,8 +1176,8 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { return "", &Error{ErrInvalidNamedCapture, s} } - capture := t[:end+1] // "(?P" - name := t[4:end] // "name" + capture := t[:end+1] // "(?P" or "(?" + name := t[exprStartPos:end] // "name" if err = checkUTF8(name); err != nil { return "", err } @@ -1853,6 +1859,22 @@ func cleanClass(rp *[]rune) []rune { return r[:w] } +// inCharClass reports whether r is in the class. +// It assumes the class has been cleaned by cleanClass. +func inCharClass(r rune, class []rune) bool { + _, ok := sort.Find(len(class)/2, func(i int) int { + lo, hi := class[2*i], class[2*i+1] + if r > hi { + return +1 + } + if r < lo { + return -1 + } + return 0 + }) + return ok +} + // appendLiteral returns the result of appending the literal x to the class r. func appendLiteral(r []rune, x rune, flags Flags) []rune { if flags&FoldCase != 0 { @@ -1937,7 +1959,7 @@ func appendClass(r []rune, x []rune) []rune { return r } -// appendFolded returns the result of appending the case folding of the class x to the class r. +// appendFoldedClass returns the result of appending the case folding of the class x to the class r. func appendFoldedClass(r []rune, x []rune) []rune { for i := 0; i < len(x); i += 2 { r = appendFoldedRange(r, x[i], x[i+1]) diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index 896cdc42c2..6a3705ec8f 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -106,7 +106,9 @@ func EmptyOpContext(r1, r2 rune) EmptyOp { // during the evaluation of the \b and \B zero-width assertions. // These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. func IsWordChar(r rune) bool { - return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' + // Test for lowercase letters first, as these occur more + // frequently than uppercase letters in common cases. + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' } // An Inst is a single instruction in a regular expression program. @@ -189,7 +191,7 @@ Loop: const noMatch = -1 // MatchRune reports whether the instruction matches (and consumes) r. -// It should only be called when i.Op == InstRune. +// It should only be called when i.Op == [InstRune]. func (i *Inst) MatchRune(r rune) bool { return i.MatchRunePos(r) != noMatch } @@ -198,7 +200,7 @@ func (i *Inst) MatchRune(r rune) bool { // If so, MatchRunePos returns the index of the matching rune pair // (or, when len(i.Rune) == 1, rune singleton). // If not, MatchRunePos returns -1. -// MatchRunePos should only be called when i.Op == InstRune. +// MatchRunePos should only be called when i.Op == [InstRune]. func (i *Inst) MatchRunePos(r rune) int { rune := i.Rune @@ -245,7 +247,7 @@ func (i *Inst) MatchRunePos(r rune) int { lo := 0 hi := len(rune) / 2 for lo < hi { - m := lo + (hi-lo)/2 + m := int(uint(lo+hi) >> 1) if c := rune[2*m]; c <= r { if r <= rune[2*m+1] { return m @@ -260,7 +262,7 @@ func (i *Inst) MatchRunePos(r rune) int { // MatchEmptyWidth reports whether the instruction matches // an empty string between the runes before and after. -// It should only be called when i.Op == InstEmptyWidth. +// It should only be called when i.Op == [InstEmptyWidth]. func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { switch EmptyOp(i.Arg) { case EmptyBeginLine: diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go index 3a4d2d201c..8ad3653abb 100644 --- a/vendor/github.com/grafana/regexp/syntax/regexp.go +++ b/vendor/github.com/grafana/regexp/syntax/regexp.go @@ -8,6 +8,7 @@ package syntax // In this package, re is always a *Regexp and r is always a rune. import ( + "slices" "strconv" "strings" "unicode" @@ -75,24 +76,10 @@ func (x *Regexp) Equal(y *Regexp) bool { } case OpLiteral, OpCharClass: - if len(x.Rune) != len(y.Rune) { - return false - } - for i, r := range x.Rune { - if r != y.Rune[i] { - return false - } - } + return slices.Equal(x.Rune, y.Rune) case OpAlternate, OpConcat: - if len(x.Sub) != len(y.Sub) { - return false - } - for i, sub := range x.Sub { - if !sub.Equal(y.Sub[i]) { - return false - } - } + return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) }) case OpStar, OpPlus, OpQuest: if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) { @@ -112,8 +99,165 @@ func (x *Regexp) Equal(y *Regexp) bool { return true } +// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp. +type printFlags uint8 + +const ( + flagI printFlags = 1 << iota // (?i: + flagM // (?m: + flagS // (?s: + flagOff // ) + flagPrec // (?: ) + negShift = 5 // flagI<") @@ -122,15 +266,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { case OpEmptyMatch: b.WriteString(`(?:)`) case OpLiteral: - if re.Flags&FoldCase != 0 { - b.WriteString(`(?i:`) - } for _, r := range re.Rune { escape(b, r, false) } - if re.Flags&FoldCase != 0 { - b.WriteString(`)`) - } case OpCharClass: if len(re.Rune)%2 != 0 { b.WriteString(`[invalid char class]`) @@ -147,7 +285,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i]+1, re.Rune[i+1]-1 escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } @@ -156,25 +296,25 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i], re.Rune[i+1] escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } } b.WriteRune(']') - case OpAnyCharNotNL: - b.WriteString(`(?-s:.)`) - case OpAnyChar: - b.WriteString(`(?s:.)`) + case OpAnyCharNotNL, OpAnyChar: + b.WriteString(`.`) case OpBeginLine: - b.WriteString(`(?m:^)`) + b.WriteString(`^`) case OpEndLine: - b.WriteString(`(?m:$)`) + b.WriteString(`$`) case OpBeginText: b.WriteString(`\A`) case OpEndText: if re.Flags&WasDollar != 0 { - b.WriteString(`(?-m:$)`) + b.WriteString(`$`) } else { b.WriteString(`\z`) } @@ -191,17 +331,17 @@ func writeRegexp(b *strings.Builder, re *Regexp) { b.WriteRune('(') } if re.Sub[0].Op != OpEmptyMatch { - writeRegexp(b, re.Sub[0]) + writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags) } b.WriteRune(')') case OpStar, OpPlus, OpQuest, OpRepeat: - if sub := re.Sub[0]; sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p := printFlags(0) + sub := re.Sub[0] + if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { + p = flagPrec } + writeRegexp(b, sub, p, flags) + switch re.Op { case OpStar: b.WriteRune('*') @@ -225,27 +365,31 @@ func writeRegexp(b *strings.Builder, re *Regexp) { } case OpConcat: for _, sub := range re.Sub { + p := printFlags(0) if sub.Op == OpAlternate { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p = flagPrec } + writeRegexp(b, sub, p, flags) } case OpAlternate: for i, sub := range re.Sub { if i > 0 { b.WriteRune('|') } - writeRegexp(b, sub) + writeRegexp(b, sub, 0, flags) } } } func (re *Regexp) String() string { var b strings.Builder - writeRegexp(&b, re) + var flags map[*Regexp]printFlags + must, cant := calcFlags(re, &flags) + must |= (cant &^ flagI) << negShift + if must != 0 { + must |= flagOff + } + writeRegexp(&b, re, must, flags) return b.String() } diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 0000000000..402433593c --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000000..d31b378152 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 0000000000..4528059ca6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +version: 2 + +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + version_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000000..87d5574777 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000000..de264c85a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,721 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
    + See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
    + +
    + See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
    + +
    + See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
    + +
    + See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
    + + +
    + See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
    + +
    + See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
    + +
    + See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
    + +
    + See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
    + +
    + See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
    + +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 0000000000..ca6685e2b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 0000000000..ea5a692d51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 0000000000..ea7324da67 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 0000000000..f65eb3909c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 0000000000..e82fa3bb7b --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 0000000000..abade2d605 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 0000000000..074018d8f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 0000000000..535cbadfde --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 0000000000..aff942205f --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 0000000000..b3d262958f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 0000000000..8b6e5c6638 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 0000000000..e36d9742f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 0000000000..0ebc9aaac7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 0000000000..84aa3d12f0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 0000000000..0f56b02d74 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 0000000000..ba7e8e6b02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 0000000000..c4c7ab2d1f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 0000000000..908c17de63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 0000000000..77ecd68e0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000000..3954c51219 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000000..e802579c4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000000..4465fbe9e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000000..6050c10f4c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 0000000000..40796a49d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 0000000000..77395a6b8b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 0000000000..13c6040a5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 0000000000..2754bac6f1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 0000000000..34d01f4aa6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 0000000000..5a4412f907 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 0000000000..92e2347bbc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 0000000000..25ca983941 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 0000000000..1952f175b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 0000000000..9c28840c3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,731 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 0000000000..32a7f401d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 0000000000..01a01e486e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 0000000000..55a388553d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 0000000000..0e59a242d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 0000000000..6a5a2988b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 0000000000..bbca17234a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 0000000000..774c5f00fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 0000000000..b7b83164bc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 0000000000..5ca46038ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 0000000000..4613724e9d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 0000000000..84a79fde76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 0000000000..d36be7bd8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 0000000000..f45a3da7da --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 0000000000..8f8223cd3a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,642 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + if final { + s.eofWritten = true + } + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 0000000000..20671dcb91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 0000000000..e47af66e7c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,415 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 0000000000..667ca06794 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 0000000000..2f8860a722 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000000..d04a829b0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000000..bcde398695 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000000..8adfebb029 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 0000000000..ab26326a8f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 0000000000..474cb77d2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 0000000000..5d73c21ebd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 0000000000..09164856d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 0000000000..777290d44c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 0000000000..fc40c82001 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 0000000000..ddb63aa91b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 0000000000..ae7d4d3295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 0000000000..d4221edf4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 0000000000..0be16cefc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 0000000000..6f3b0cb102 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 0000000000..f41932b7a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 0000000000..0782b86e3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 0000000000..57b9c31c02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 0000000000..d7fe6d82d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000000..c59f17e07a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000000..f5591fa1e8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000000..2fb35b788c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 0000000000..8014174a77 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 0000000000..ec13594e89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 0000000000..29c15c8c4e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 0000000000..066bef2a4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,125 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index accd7abaf9..21508edbdb 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -9,10 +9,7 @@ You can access the CPU information by accessing the shared CPU variable of the c Package home: https://github.com/klauspost/cpuid [![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) -[![Build Status][3]][4] - -[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master -[4]: https://travis-ci.org/klauspost/cpuid +[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml) ## installing @@ -285,7 +282,12 @@ Exit Code 1 | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | | AMXTILE | Tile architecture | +| APX_F | Intel APX | | AVX | AVX functions | +| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | +| AVX10_128 | If set indicates that AVX10 128-bit vector support is present | +| AVX10_256 | If set indicates that AVX10 256-bit vector support is present | +| AVX10_512 | If set indicates that AVX10 512-bit vector support is present | | AVX2 | AVX2 functions | | AVX512BF16 | AVX-512 BFLOAT16 Instructions | | AVX512BITALG | AVX-512 Bit Algorithms | @@ -308,6 +310,7 @@ Exit Code 1 | AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | | AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | | AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | | BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | | BMI1 | Bit Manipulation Instruction Set 1 | | BMI2 | Bit Manipulation Instruction Set 2 | @@ -365,6 +368,8 @@ Exit Code 1 | IDPRED_CTRL | IPRED_DIS | | INT_WBINVD | WBINVD/WBNOINVD are interruptible. | | INVLPGB | NVLPGB and TLBSYNC instruction supported | +| KEYLOCKER | Key locker | +| KEYLOCKERW | Key locker wide | | LAHF | LAHF/SAHF in long mode | | LAM | If set, CPU supports Linear Address Masking | | LBRVIRT | LBR virtualization | @@ -380,7 +385,7 @@ Exit Code 1 | MOVDIRI | Move Doubleword as Direct Store | | MOVSB_ZL | Fast Zero-Length MOVSB | | MPX | Intel MPX (Memory Protection Extensions) | -| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | | MSRIRC | Instruction Retired Counter MSR available | | MSRLIST | Read/Write List of Model Specific Registers | | MSR_PAGEFLUSH | Page Flush MSR available | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index d015c744e8..53bc18ca71 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -67,188 +67,201 @@ const ( // Keep index -1 as unknown UNKNOWN = -1 - // Add features - ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - AESNI // Advanced Encryption Standard New Instructions - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - AMXBF16 // Tile computational operations on BFLOAT16 numbers - AMXFP16 // Tile computational operations on FP16 numbers - AMXINT8 // Tile computational operations on 8-bit integers - AMXTILE // Tile architecture - AVX // AVX functions - AVX2 // AVX2 functions - AVX512BF16 // AVX-512 BFLOAT16 Instructions - AVX512BITALG // AVX-512 Bit Algorithms - AVX512BW // AVX-512 Byte and Word Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512F // AVX-512 Foundation - AVX512FP16 // AVX-512 FP16 Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VL // AVX-512 Vector Length Extensions - AVX512VNNI // AVX-512 Vector Neural Network Instructions - AVX512VP2INTERSECT // AVX-512 Intersect for D/Q - AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - AVXIFMA // AVX-IFMA instructions - AVXNECONVERT // AVX-NE-CONVERT instructions - AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one - AVXVNNI // AVX (VEX encoded) VNNI neural network instructions - AVXVNNIINT8 // AVX-VNNI-INT8 instructions - BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - CETIBT // Intel CET Indirect Branch Tracking - CETSS // Intel CET Shadow Stack - CLDEMOTE // Cache Line Demote - CLMUL // Carry-less Multiplication - CLZERO // CLZERO instruction supported - CMOV // i686 CMOV - CMPCCXADD // CMPCCXADD instructions - CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB - CMPXCHG8 // CMPXCHG8 instruction - CPBOOST // Core Performance Boost - CPPC // AMD: Collaborative Processor Performance Control - CX16 // CMPXCHG16B Instruction - EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ - ENQCMD // Enqueue Command - ERMS // Enhanced REP MOVSB/STOSB - F16C // Half-precision floating-point conversion - FLUSH_L1D // Flush L1D cache - FMA3 // Intel FMA 3. Does not imply AVX. - FMA4 // Bulldozer FMA4 functions - FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide - FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide - FSRM // Fast Short Rep Mov - FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 - FXSROPT // FXSAVE/FXRSTOR optimizations - GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. - HLE // Hardware Lock Elision - HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR - HTT // Hyperthreading (enabled) - HWA // Hardware assert supported. Indicates support for MSRC001_10 - HYBRID_CPU // This part has CPUs of more than one type. - HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors - IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) - IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - IBRS // AMD: Indirect Branch Restricted Speculation - IBRS_PREFERRED // AMD: IBRS is preferred over software solution - IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection - IBS // Instruction Based Sampling (AMD) - IBSBRNTRGT // Instruction Based Sampling Feature (AMD) - IBSFETCHSAM // Instruction Based Sampling Feature (AMD) - IBSFFV // Instruction Based Sampling Feature (AMD) - IBSOPCNT // Instruction Based Sampling Feature (AMD) - IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) - IBSOPSAM // Instruction Based Sampling Feature (AMD) - IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) - IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) - IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported - IBS_OPDATA4 // AMD: IBS op data 4 MSR supported - IBS_OPFUSE // AMD: Indicates support for IbsOpFuse - IBS_PREVENTHOST // Disallowing IBS use by the host supported - IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 - IDPRED_CTRL // IPRED_DIS - INT_WBINVD // WBINVD/WBNOINVD are interruptible. - INVLPGB // NVLPGB and TLBSYNC instruction supported - LAHF // LAHF/SAHF in long mode - LAM // If set, CPU supports Linear Address Masking - LBRVIRT // LBR virtualization - LZCNT // LZCNT instruction - MCAOVERFLOW // MCA overflow recovery support. - MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. - MCOMMIT // MCOMMIT instruction supported - MD_CLEAR // VERW clears CPU buffers - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - MOVBE // MOVBE instruction (big-endian) - MOVDIR64B // Move 64 Bytes as Direct Store - MOVDIRI // Move Doubleword as Direct Store - MOVSB_ZL // Fast Zero-Length MOVSB - MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD - MPX // Intel MPX (Memory Protection Extensions) - MSRIRC // Instruction Retired Counter MSR available - MSRLIST // Read/Write List of Model Specific Registers - MSR_PAGEFLUSH // Page Flush MSR available - NRIPS // Indicates support for NRIP save on VMEXIT - NX // NX (No-Execute) bit - OSXSAVE // XSAVE enabled by OS - PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption - POPCNT // POPCNT instruction - PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled - PREFETCHI // PREFETCHIT0/1 instructions - PSFD // Predictive Store Forward Disable - RDPRU // RDPRU instruction supported - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - RDTSCP // RDTSCP Instruction - RRSBA_CTRL // Restricted RSB Alternate - RTM // Restricted Transactional Memory - RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SERIALIZE // Serialize Instruction Execution - SEV // AMD Secure Encrypted Virtualization supported - SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host - SEV_ALTERNATIVE // AMD SEV Alternate Injection supported - SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests - SEV_ES // AMD SEV Encrypted State supported - SEV_RESTRICTED // AMD SEV Restricted Injection supported - SEV_SNP // AMD SEV Secure Nested Paging supported - SGX // Software Guard Extensions - SGXLC // Software Guard Extensions Launch Control - SHA // Intel SHA Extensions - SME // AMD Secure Memory Encryption supported - SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced - SPEC_CTRL_SSBD // Speculative Store Bypass Disable - SRBDS_CTRL // SRBDS mitigation MSR available - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE42 // Nehalem SSE4.2 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSSE3 // Conroe SSSE3 functions - STIBP // Single Thread Indirect Branch Predictors - STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On - STOSB_SHORT // Fast short STOSB - SUCCOR // Software uncorrectable error containment and recovery capability. - SVM // AMD Secure Virtual Machine - SVMDA // Indicates support for the SVM decode assists. - SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control - SVML // AMD SVM lock. Indicates support for SVM-Lock. - SVMNP // AMD SVM nested paging - SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter - SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold - SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. - SYSEE // SYSENTER and SYSEXIT instructions - TBM // AMD Trailing Bit Manipulation - TDX_GUEST // Intel Trust Domain Extensions Guest - TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations - TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. - TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. - TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 - TSXLDTRK // Intel TSX Suspend Load Address Tracking - VAES // Vector AES. AVX(512) versions requires additional checks. - VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. - VMPL // AMD VM Permission Levels supported - VMSA_REGPROT // AMD VMSA Register Protection supported - VMX // Virtual Machine Extensions - VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. - VTE // AMD Virtual Transparent Encryption supported - WAITPKG // TPAUSE, UMONITOR, UMWAIT - WBNOINVD // Write Back and Do Not Invalidate Cache - WRMSRNS // Non-Serializing Write to Model Specific Register - X87 // FPU - XGETBV1 // Supports XGETBV with ECX = 1 - XOP // Bulldozer XOP functions - XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV - XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. - XSAVEOPT // XSAVEOPT available - XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS // ARM features: AESARM // AES instructions @@ -302,9 +315,11 @@ type CPUInfo struct { L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -1071,6 +1086,32 @@ func hasSGX(available, lc bool) (rval SGXSupport) { return } +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + func support() flagSet { var fs flagSet mfi := maxFunctionID() @@ -1165,6 +1206,7 @@ func support() flagSet { fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) fs.setIf(ecx&(1<<13) != 0, TME) fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<23) != 0, KEYLOCKER) fs.setIf(ecx&(1<<27) != 0, MOVDIRI) fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) fs.setIf(ecx&(1<<29) != 0, ENQCMD) @@ -1201,7 +1243,10 @@ func support() flagSet { // CPUID.(EAX=7, ECX=1).EDX fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) fs.setIf(edx1&(1<<14) != 0, PREFETCHI) + fs.setIf(edx1&(1<<19) != 0, AVX10) + fs.setIf(edx1&(1<<21) != 0, APX_F) // Only detect AVX-512 features if XGETBV is supported if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { @@ -1252,6 +1297,19 @@ func support() flagSet { fs.setIf(edx&(1<<4) != 0, BHI_CTRL) fs.setIf(edx&(1<<5) != 0, MCDT_NO) + // Add keylocker features. + if fs.inSet(KEYLOCKER) && mfi >= 0x19 { + _, ebx, _, _ := cpuidex(0x19, 0) + fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4) + } + + // Add AVX10 features. + if fs.inSet(AVX10) && mfi >= 0x24 { + _, ebx, _, _ := cpuidex(0x24, 0) + fs.setIf(ebx&(1<<16) != 0, AVX10_128) + fs.setIf(ebx&(1<<17) != 0, AVX10_256) + fs.setIf(ebx&(1<<18) != 0, AVX10_512) + } } // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) @@ -1394,6 +1452,29 @@ func support() flagSet { fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) } + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, _, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + } + + if mfi >= 0x20 { + // Microsoft has decided to purposefully hide the information + // of the guest TEE when VMs are being created using Hyper-V. + // + // This leads us to check for the Hyper-V cpuid features + // (0x4000000C), and then for the `ebx` value set. + // + // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part + // we're mostly interested about,according to: + // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174 + _, ebx, _, _ := cpuid(0x4000000C) + fs.setIf(ebx == 0xbe3, TDX_GUEST) + } + if mfi >= 0x21 { // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21). _, ebx, ecx, edx := cpuid(0x21) @@ -1404,6 +1485,14 @@ func support() flagSet { return fs } +func (c *CPUInfo) supportAVX10() uint8 { + if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) { + _, ebx, _, _ := cpuidex(0x24, 0) + return uint8(ebx) + } + return 0 +} + func valAsString(values ...uint32) []byte { r := make([]byte, 4*len(values)) for i, v := range values { diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index c946824ec0..799b400c2e 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -27,10 +27,12 @@ func addInfo(c *CPUInfo, safe bool) { c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) c.ThreadsPerCore = threadsPerCore() c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() + c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 024c706af5..3a25603103 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -16,210 +16,223 @@ func _() { _ = x[AMXFP16-6] _ = x[AMXINT8-7] _ = x[AMXTILE-8] - _ = x[AVX-9] - _ = x[AVX2-10] - _ = x[AVX512BF16-11] - _ = x[AVX512BITALG-12] - _ = x[AVX512BW-13] - _ = x[AVX512CD-14] - _ = x[AVX512DQ-15] - _ = x[AVX512ER-16] - _ = x[AVX512F-17] - _ = x[AVX512FP16-18] - _ = x[AVX512IFMA-19] - _ = x[AVX512PF-20] - _ = x[AVX512VBMI-21] - _ = x[AVX512VBMI2-22] - _ = x[AVX512VL-23] - _ = x[AVX512VNNI-24] - _ = x[AVX512VP2INTERSECT-25] - _ = x[AVX512VPOPCNTDQ-26] - _ = x[AVXIFMA-27] - _ = x[AVXNECONVERT-28] - _ = x[AVXSLOW-29] - _ = x[AVXVNNI-30] - _ = x[AVXVNNIINT8-31] - _ = x[BHI_CTRL-32] - _ = x[BMI1-33] - _ = x[BMI2-34] - _ = x[CETIBT-35] - _ = x[CETSS-36] - _ = x[CLDEMOTE-37] - _ = x[CLMUL-38] - _ = x[CLZERO-39] - _ = x[CMOV-40] - _ = x[CMPCCXADD-41] - _ = x[CMPSB_SCADBS_SHORT-42] - _ = x[CMPXCHG8-43] - _ = x[CPBOOST-44] - _ = x[CPPC-45] - _ = x[CX16-46] - _ = x[EFER_LMSLE_UNS-47] - _ = x[ENQCMD-48] - _ = x[ERMS-49] - _ = x[F16C-50] - _ = x[FLUSH_L1D-51] - _ = x[FMA3-52] - _ = x[FMA4-53] - _ = x[FP128-54] - _ = x[FP256-55] - _ = x[FSRM-56] - _ = x[FXSR-57] - _ = x[FXSROPT-58] - _ = x[GFNI-59] - _ = x[HLE-60] - _ = x[HRESET-61] - _ = x[HTT-62] - _ = x[HWA-63] - _ = x[HYBRID_CPU-64] - _ = x[HYPERVISOR-65] - _ = x[IA32_ARCH_CAP-66] - _ = x[IA32_CORE_CAP-67] - _ = x[IBPB-68] - _ = x[IBRS-69] - _ = x[IBRS_PREFERRED-70] - _ = x[IBRS_PROVIDES_SMP-71] - _ = x[IBS-72] - _ = x[IBSBRNTRGT-73] - _ = x[IBSFETCHSAM-74] - _ = x[IBSFFV-75] - _ = x[IBSOPCNT-76] - _ = x[IBSOPCNTEXT-77] - _ = x[IBSOPSAM-78] - _ = x[IBSRDWROPCNT-79] - _ = x[IBSRIPINVALIDCHK-80] - _ = x[IBS_FETCH_CTLX-81] - _ = x[IBS_OPDATA4-82] - _ = x[IBS_OPFUSE-83] - _ = x[IBS_PREVENTHOST-84] - _ = x[IBS_ZEN4-85] - _ = x[IDPRED_CTRL-86] - _ = x[INT_WBINVD-87] - _ = x[INVLPGB-88] - _ = x[LAHF-89] - _ = x[LAM-90] - _ = x[LBRVIRT-91] - _ = x[LZCNT-92] - _ = x[MCAOVERFLOW-93] - _ = x[MCDT_NO-94] - _ = x[MCOMMIT-95] - _ = x[MD_CLEAR-96] - _ = x[MMX-97] - _ = x[MMXEXT-98] - _ = x[MOVBE-99] - _ = x[MOVDIR64B-100] - _ = x[MOVDIRI-101] - _ = x[MOVSB_ZL-102] - _ = x[MOVU-103] - _ = x[MPX-104] - _ = x[MSRIRC-105] - _ = x[MSRLIST-106] - _ = x[MSR_PAGEFLUSH-107] - _ = x[NRIPS-108] - _ = x[NX-109] - _ = x[OSXSAVE-110] - _ = x[PCONFIG-111] - _ = x[POPCNT-112] - _ = x[PPIN-113] - _ = x[PREFETCHI-114] - _ = x[PSFD-115] - _ = x[RDPRU-116] - _ = x[RDRAND-117] - _ = x[RDSEED-118] - _ = x[RDTSCP-119] - _ = x[RRSBA_CTRL-120] - _ = x[RTM-121] - _ = x[RTM_ALWAYS_ABORT-122] - _ = x[SERIALIZE-123] - _ = x[SEV-124] - _ = x[SEV_64BIT-125] - _ = x[SEV_ALTERNATIVE-126] - _ = x[SEV_DEBUGSWAP-127] - _ = x[SEV_ES-128] - _ = x[SEV_RESTRICTED-129] - _ = x[SEV_SNP-130] - _ = x[SGX-131] - _ = x[SGXLC-132] - _ = x[SHA-133] - _ = x[SME-134] - _ = x[SME_COHERENT-135] - _ = x[SPEC_CTRL_SSBD-136] - _ = x[SRBDS_CTRL-137] - _ = x[SSE-138] - _ = x[SSE2-139] - _ = x[SSE3-140] - _ = x[SSE4-141] - _ = x[SSE42-142] - _ = x[SSE4A-143] - _ = x[SSSE3-144] - _ = x[STIBP-145] - _ = x[STIBP_ALWAYSON-146] - _ = x[STOSB_SHORT-147] - _ = x[SUCCOR-148] - _ = x[SVM-149] - _ = x[SVMDA-150] - _ = x[SVMFBASID-151] - _ = x[SVML-152] - _ = x[SVMNP-153] - _ = x[SVMPF-154] - _ = x[SVMPFT-155] - _ = x[SYSCALL-156] - _ = x[SYSEE-157] - _ = x[TBM-158] - _ = x[TDX_GUEST-159] - _ = x[TLB_FLUSH_NESTED-160] - _ = x[TME-161] - _ = x[TOPEXT-162] - _ = x[TSCRATEMSR-163] - _ = x[TSXLDTRK-164] - _ = x[VAES-165] - _ = x[VMCBCLEAN-166] - _ = x[VMPL-167] - _ = x[VMSA_REGPROT-168] - _ = x[VMX-169] - _ = x[VPCLMULQDQ-170] - _ = x[VTE-171] - _ = x[WAITPKG-172] - _ = x[WBNOINVD-173] - _ = x[WRMSRNS-174] - _ = x[X87-175] - _ = x[XGETBV1-176] - _ = x[XOP-177] - _ = x[XSAVE-178] - _ = x[XSAVEC-179] - _ = x[XSAVEOPT-180] - _ = x[XSAVES-181] - _ = x[AESARM-182] - _ = x[ARMCPUID-183] - _ = x[ASIMD-184] - _ = x[ASIMDDP-185] - _ = x[ASIMDHP-186] - _ = x[ASIMDRDM-187] - _ = x[ATOMICS-188] - _ = x[CRC32-189] - _ = x[DCPOP-190] - _ = x[EVTSTRM-191] - _ = x[FCMA-192] - _ = x[FP-193] - _ = x[FPHP-194] - _ = x[GPA-195] - _ = x[JSCVT-196] - _ = x[LRCPC-197] - _ = x[PMULL-198] - _ = x[SHA1-199] - _ = x[SHA2-200] - _ = x[SHA3-201] - _ = x[SHA512-202] - _ = x[SM3-203] - _ = x[SM4-204] - _ = x[SVE-205] - _ = x[lastID-206] + _ = x[APX_F-9] + _ = x[AVX-10] + _ = x[AVX10-11] + _ = x[AVX10_128-12] + _ = x[AVX10_256-13] + _ = x[AVX10_512-14] + _ = x[AVX2-15] + _ = x[AVX512BF16-16] + _ = x[AVX512BITALG-17] + _ = x[AVX512BW-18] + _ = x[AVX512CD-19] + _ = x[AVX512DQ-20] + _ = x[AVX512ER-21] + _ = x[AVX512F-22] + _ = x[AVX512FP16-23] + _ = x[AVX512IFMA-24] + _ = x[AVX512PF-25] + _ = x[AVX512VBMI-26] + _ = x[AVX512VBMI2-27] + _ = x[AVX512VL-28] + _ = x[AVX512VNNI-29] + _ = x[AVX512VP2INTERSECT-30] + _ = x[AVX512VPOPCNTDQ-31] + _ = x[AVXIFMA-32] + _ = x[AVXNECONVERT-33] + _ = x[AVXSLOW-34] + _ = x[AVXVNNI-35] + _ = x[AVXVNNIINT8-36] + _ = x[AVXVNNIINT16-37] + _ = x[BHI_CTRL-38] + _ = x[BMI1-39] + _ = x[BMI2-40] + _ = x[CETIBT-41] + _ = x[CETSS-42] + _ = x[CLDEMOTE-43] + _ = x[CLMUL-44] + _ = x[CLZERO-45] + _ = x[CMOV-46] + _ = x[CMPCCXADD-47] + _ = x[CMPSB_SCADBS_SHORT-48] + _ = x[CMPXCHG8-49] + _ = x[CPBOOST-50] + _ = x[CPPC-51] + _ = x[CX16-52] + _ = x[EFER_LMSLE_UNS-53] + _ = x[ENQCMD-54] + _ = x[ERMS-55] + _ = x[F16C-56] + _ = x[FLUSH_L1D-57] + _ = x[FMA3-58] + _ = x[FMA4-59] + _ = x[FP128-60] + _ = x[FP256-61] + _ = x[FSRM-62] + _ = x[FXSR-63] + _ = x[FXSROPT-64] + _ = x[GFNI-65] + _ = x[HLE-66] + _ = x[HRESET-67] + _ = x[HTT-68] + _ = x[HWA-69] + _ = x[HYBRID_CPU-70] + _ = x[HYPERVISOR-71] + _ = x[IA32_ARCH_CAP-72] + _ = x[IA32_CORE_CAP-73] + _ = x[IBPB-74] + _ = x[IBPB_BRTYPE-75] + _ = x[IBRS-76] + _ = x[IBRS_PREFERRED-77] + _ = x[IBRS_PROVIDES_SMP-78] + _ = x[IBS-79] + _ = x[IBSBRNTRGT-80] + _ = x[IBSFETCHSAM-81] + _ = x[IBSFFV-82] + _ = x[IBSOPCNT-83] + _ = x[IBSOPCNTEXT-84] + _ = x[IBSOPSAM-85] + _ = x[IBSRDWROPCNT-86] + _ = x[IBSRIPINVALIDCHK-87] + _ = x[IBS_FETCH_CTLX-88] + _ = x[IBS_OPDATA4-89] + _ = x[IBS_OPFUSE-90] + _ = x[IBS_PREVENTHOST-91] + _ = x[IBS_ZEN4-92] + _ = x[IDPRED_CTRL-93] + _ = x[INT_WBINVD-94] + _ = x[INVLPGB-95] + _ = x[KEYLOCKER-96] + _ = x[KEYLOCKERW-97] + _ = x[LAHF-98] + _ = x[LAM-99] + _ = x[LBRVIRT-100] + _ = x[LZCNT-101] + _ = x[MCAOVERFLOW-102] + _ = x[MCDT_NO-103] + _ = x[MCOMMIT-104] + _ = x[MD_CLEAR-105] + _ = x[MMX-106] + _ = x[MMXEXT-107] + _ = x[MOVBE-108] + _ = x[MOVDIR64B-109] + _ = x[MOVDIRI-110] + _ = x[MOVSB_ZL-111] + _ = x[MOVU-112] + _ = x[MPX-113] + _ = x[MSRIRC-114] + _ = x[MSRLIST-115] + _ = x[MSR_PAGEFLUSH-116] + _ = x[NRIPS-117] + _ = x[NX-118] + _ = x[OSXSAVE-119] + _ = x[PCONFIG-120] + _ = x[POPCNT-121] + _ = x[PPIN-122] + _ = x[PREFETCHI-123] + _ = x[PSFD-124] + _ = x[RDPRU-125] + _ = x[RDRAND-126] + _ = x[RDSEED-127] + _ = x[RDTSCP-128] + _ = x[RRSBA_CTRL-129] + _ = x[RTM-130] + _ = x[RTM_ALWAYS_ABORT-131] + _ = x[SBPB-132] + _ = x[SERIALIZE-133] + _ = x[SEV-134] + _ = x[SEV_64BIT-135] + _ = x[SEV_ALTERNATIVE-136] + _ = x[SEV_DEBUGSWAP-137] + _ = x[SEV_ES-138] + _ = x[SEV_RESTRICTED-139] + _ = x[SEV_SNP-140] + _ = x[SGX-141] + _ = x[SGXLC-142] + _ = x[SHA-143] + _ = x[SME-144] + _ = x[SME_COHERENT-145] + _ = x[SPEC_CTRL_SSBD-146] + _ = x[SRBDS_CTRL-147] + _ = x[SRSO_MSR_FIX-148] + _ = x[SRSO_NO-149] + _ = x[SRSO_USER_KERNEL_NO-150] + _ = x[SSE-151] + _ = x[SSE2-152] + _ = x[SSE3-153] + _ = x[SSE4-154] + _ = x[SSE42-155] + _ = x[SSE4A-156] + _ = x[SSSE3-157] + _ = x[STIBP-158] + _ = x[STIBP_ALWAYSON-159] + _ = x[STOSB_SHORT-160] + _ = x[SUCCOR-161] + _ = x[SVM-162] + _ = x[SVMDA-163] + _ = x[SVMFBASID-164] + _ = x[SVML-165] + _ = x[SVMNP-166] + _ = x[SVMPF-167] + _ = x[SVMPFT-168] + _ = x[SYSCALL-169] + _ = x[SYSEE-170] + _ = x[TBM-171] + _ = x[TDX_GUEST-172] + _ = x[TLB_FLUSH_NESTED-173] + _ = x[TME-174] + _ = x[TOPEXT-175] + _ = x[TSCRATEMSR-176] + _ = x[TSXLDTRK-177] + _ = x[VAES-178] + _ = x[VMCBCLEAN-179] + _ = x[VMPL-180] + _ = x[VMSA_REGPROT-181] + _ = x[VMX-182] + _ = x[VPCLMULQDQ-183] + _ = x[VTE-184] + _ = x[WAITPKG-185] + _ = x[WBNOINVD-186] + _ = x[WRMSRNS-187] + _ = x[X87-188] + _ = x[XGETBV1-189] + _ = x[XOP-190] + _ = x[XSAVE-191] + _ = x[XSAVEC-192] + _ = x[XSAVEOPT-193] + _ = x[XSAVES-194] + _ = x[AESARM-195] + _ = x[ARMCPUID-196] + _ = x[ASIMD-197] + _ = x[ASIMDDP-198] + _ = x[ASIMDHP-199] + _ = x[ASIMDRDM-200] + _ = x[ATOMICS-201] + _ = x[CRC32-202] + _ = x[DCPOP-203] + _ = x[EVTSTRM-204] + _ = x[FCMA-205] + _ = x[FP-206] + _ = x[FPHP-207] + _ = x[GPA-208] + _ = x[JSCVT-209] + _ = x[LRCPC-210] + _ = x[PMULL-211] + _ = x[SHA1-212] + _ = x[SHA2-213] + _ = x[SHA3-214] + _ = x[SHA512-215] + _ = x[SM3-216] + _ = x[SM4-217] + _ = x[SVE-218] + _ = x[lastID-219] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go index d906bb05ce..d649eccc85 100644 --- a/vendor/github.com/moby/spdystream/connection.go +++ b/vendor/github.com/moby/spdystream/connection.go @@ -208,9 +208,10 @@ type Connection struct { nextStreamId spdy.StreamId receivedStreamId spdy.StreamId - pingIdLock sync.Mutex - pingId uint32 - pingChans map[uint32]chan error + // pingLock protects pingChans and pingId + pingLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error shutdownLock sync.Mutex shutdownChan chan error @@ -274,16 +275,20 @@ func NewConnection(conn net.Conn, server bool) (*Connection, error) { // returns the response time func (s *Connection) Ping() (time.Duration, error) { pid := s.pingId - s.pingIdLock.Lock() + s.pingLock.Lock() if s.pingId > 0x7ffffffe { s.pingId = s.pingId - 0x7ffffffe } else { s.pingId = s.pingId + 2 } - s.pingIdLock.Unlock() pingChan := make(chan error) s.pingChans[pid] = pingChan - defer delete(s.pingChans, pid) + s.pingLock.Unlock() + defer func() { + s.pingLock.Lock() + delete(s.pingChans, pid) + s.pingLock.Unlock() + }() frame := &spdy.PingFrame{Id: pid} startTime := time.Now() @@ -612,10 +617,14 @@ func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { } func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { - if s.pingId&0x01 != frame.Id&0x01 { + s.pingLock.Lock() + pingId := s.pingId + pingChan, pingOk := s.pingChans[frame.Id] + s.pingLock.Unlock() + + if pingId&0x01 != frame.Id&0x01 { return s.framer.WriteFrame(frame) } - pingChan, pingOk := s.pingChans[frame.Id] if pingOk { close(pingChan) } @@ -731,16 +740,14 @@ func (s *Connection) shutdown(closeTimeout time.Duration) { if err != nil { duration := 10 * time.Minute - time.AfterFunc(duration, func() { - select { - case err, ok := <-s.shutdownChan: - if ok { - debugMessage("Unhandled close error after %s: %s", duration, err) - } - default: - } - }) - s.shutdownChan <- err + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case s.shutdownChan <- err: + // error was handled + case <-timer.C: + debugMessage("Unhandled close error after %s: %s", duration, err) + } } close(s.shutdownChan) } diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go index 404e3c02df..171c1e9e33 100644 --- a/vendor/github.com/moby/spdystream/stream.go +++ b/vendor/github.com/moby/spdystream/stream.go @@ -305,6 +305,8 @@ func (s *Stream) Identifier() uint32 { // IsFinished returns whether the stream has finished // sending data func (s *Stream) IsFinished() bool { + s.finishLock.Lock() + defer s.finishLock.Unlock() return s.finished } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go index c943e68330..fbb5e283b1 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go @@ -163,6 +163,11 @@ func (m *AlertGroup) contextValidateAlerts(ctx context.Context, formats strfmt.R for i := 0; i < len(m.Alerts); i++ { if m.Alerts[i] != nil { + + if swag.IsZero(m.Alerts[i]) { // not required + return nil + } + if err := m.Alerts[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("alerts" + "." + strconv.Itoa(i)) @@ -195,6 +200,7 @@ func (m *AlertGroup) contextValidateLabels(ctx context.Context, formats strfmt.R func (m *AlertGroup) contextValidateReceiver(ctx context.Context, formats strfmt.Registry) error { if m.Receiver != nil { + if err := m.Receiver.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("receiver") diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go index 31ccb2172b..338b22127a 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go @@ -68,6 +68,11 @@ func (m AlertGroups) ContextValidate(ctx context.Context, formats strfmt.Registr for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go index 0d5370edfb..2ab11ec461 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go @@ -175,6 +175,7 @@ func (m *AlertmanagerStatus) ContextValidate(ctx context.Context, formats strfmt func (m *AlertmanagerStatus) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error { if m.Cluster != nil { + if err := m.Cluster.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cluster") @@ -191,6 +192,7 @@ func (m *AlertmanagerStatus) contextValidateCluster(ctx context.Context, formats func (m *AlertmanagerStatus) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { if m.Config != nil { + if err := m.Config.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("config") @@ -207,6 +209,7 @@ func (m *AlertmanagerStatus) contextValidateConfig(ctx context.Context, formats func (m *AlertmanagerStatus) contextValidateVersionInfo(ctx context.Context, formats strfmt.Registry) error { if m.VersionInfo != nil { + if err := m.VersionInfo.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("versionInfo") diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go index 0078320f15..f470bc010f 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go @@ -156,6 +156,11 @@ func (m *ClusterStatus) contextValidatePeers(ctx context.Context, formats strfmt for i := 0; i < len(m.Peers); i++ { if m.Peers[i] != nil { + + if swag.IsZero(m.Peers[i]) { // not required + return nil + } + if err := m.Peers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("peers" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go index f7db3321c1..195bb53764 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go @@ -366,6 +366,11 @@ func (m *GettableAlert) contextValidateReceivers(ctx context.Context, formats st for i := 0; i < len(m.Receivers); i++ { if m.Receivers[i] != nil { + + if swag.IsZero(m.Receivers[i]) { // not required + return nil + } + if err := m.Receivers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("receivers" + "." + strconv.Itoa(i)) @@ -384,6 +389,7 @@ func (m *GettableAlert) contextValidateReceivers(ctx context.Context, formats st func (m *GettableAlert) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go index 4efe8cd5ec..db78dcc471 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go @@ -68,6 +68,11 @@ func (m GettableAlerts) ContextValidate(ctx context.Context, formats strfmt.Regi for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go index fe9d178d7f..9d60f6cad0 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go @@ -202,6 +202,7 @@ func (m *GettableSilence) ContextValidate(ctx context.Context, formats strfmt.Re func (m *GettableSilence) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go index cda5ef6497..fed9d0b886 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go @@ -68,6 +68,11 @@ func (m GettableSilences) ContextValidate(ctx context.Context, formats strfmt.Re for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go index 4e2061872e..fbff9875eb 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go @@ -75,6 +75,11 @@ func (m Matchers) ContextValidate(ctx context.Context, formats strfmt.Registry) for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go index dcec7f0a19..105b8b30cd 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go @@ -203,6 +203,10 @@ func (m *PostableAlert) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *PostableAlert) contextValidateAnnotations(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Annotations) { // not required + return nil + } + if err := m.Annotations.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("annotations") diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go index ed4d7fb9ba..3df968820d 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go @@ -68,6 +68,11 @@ func (m PostableAlerts) ContextValidate(ctx context.Context, formats strfmt.Regi for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/prometheus/alertmanager/config/config.go b/vendor/github.com/prometheus/alertmanager/config/config.go index ae5f786ee8..7f3602e066 100644 --- a/vendor/github.com/prometheus/alertmanager/config/config.go +++ b/vendor/github.com/prometheus/alertmanager/config/config.go @@ -15,6 +15,7 @@ package config import ( "encoding/json" + "errors" "fmt" "net" "net/url" @@ -25,11 +26,11 @@ import ( "strings" "time" - "github.com/pkg/errors" commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "gopkg.in/yaml.v2" + "github.com/prometheus/alertmanager/matchers/compat" "github.com/prometheus/alertmanager/pkg/labels" "github.com/prometheus/alertmanager/timeinterval" ) @@ -688,7 +689,7 @@ func (hp *HostPort) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if hp.Port == "" { - return errors.Errorf("address %q: port cannot be empty", s) + return fmt.Errorf("address %q: port cannot be empty", s) } return nil } @@ -710,7 +711,7 @@ func (hp *HostPort) UnmarshalJSON(data []byte) error { return err } if hp.Port == "" { - return errors.Errorf("address %q: port cannot be empty", s) + return fmt.Errorf("address %q: port cannot be empty", s) } return nil } @@ -813,7 +814,7 @@ func (r *Route) UnmarshalYAML(unmarshal func(interface{}) error) error { r.GroupByAll = true } else { labelName := model.LabelName(l) - if !labelName.IsValid() { + if !compat.IsValidLabelName(labelName) { return fmt.Errorf("invalid label name %q in group_by list", l) } r.GroupBy = append(r.GroupBy, labelName) @@ -907,7 +908,7 @@ type Receiver struct { SNSConfigs []*SNSConfig `yaml:"sns_configs,omitempty" json:"sns_configs,omitempty"` TelegramConfigs []*TelegramConfig `yaml:"telegram_configs,omitempty" json:"telegram_configs,omitempty"` WebexConfigs []*WebexConfig `yaml:"webex_configs,omitempty" json:"webex_configs,omitempty"` - MSTeamsConfigs []*MSTeamsConfig `yaml:"msteams_configs,omitempty" json:"teams_configs,omitempty"` + MSTeamsConfigs []*MSTeamsConfig `yaml:"msteams_configs,omitempty" json:"msteams_configs,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for Receiver. @@ -1005,7 +1006,7 @@ func (m *Matchers) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } for _, line := range lines { - pm, err := labels.ParseMatchers(line) + pm, err := compat.Matchers(line, "config") if err != nil { return err } @@ -1031,7 +1032,7 @@ func (m *Matchers) UnmarshalJSON(data []byte) error { return err } for _, line := range lines { - pm, err := labels.ParseMatchers(line) + pm, err := compat.Matchers(line, "config") if err != nil { return err } diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index 0da9e27ba2..d79c8b5057 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -21,7 +21,6 @@ import ( "text/template" "time" - "github.com/pkg/errors" commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/sigv4" ) @@ -169,8 +168,9 @@ var ( NotifierConfig: NotifierConfig{ VSendResolved: true, }, - Title: `{{ template "msteams.default.title" . }}`, - Text: `{{ template "msteams.default.text" . }}`, + Title: `{{ template "msteams.default.title" . }}`, + Summary: `{{ template "msteams.default.summary" . }}`, + Text: `{{ template "msteams.default.text" . }}`, } ) @@ -216,8 +216,9 @@ func (c *WebexConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type DiscordConfig struct { NotifierConfig `yaml:",inline" json:",inline"` - HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - WebhookURL *SecretURL `yaml:"webhook_url,omitempty" json:"webhook_url,omitempty"` + HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` + WebhookURL *SecretURL `yaml:"webhook_url,omitempty" json:"webhook_url,omitempty"` + WebhookURLFile string `yaml:"webhook_url_file,omitempty" json:"webhook_url_file,omitempty"` Title string `yaml:"title,omitempty" json:"title,omitempty"` Message string `yaml:"message,omitempty" json:"message,omitempty"` @@ -227,7 +228,19 @@ type DiscordConfig struct { func (c *DiscordConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultDiscordConfig type plain DiscordConfig - return unmarshal((*plain)(c)) + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if c.WebhookURL == nil && c.WebhookURLFile == "" { + return fmt.Errorf("one of webhook_url or webhook_url_file must be configured") + } + + if c.WebhookURL != nil && len(c.WebhookURLFile) > 0 { + return fmt.Errorf("at most one of webhook_url & webhook_url_file must be configured") + } + + return nil } // EmailConfig configures notifications via mail. @@ -503,11 +516,6 @@ func (c *WebhookConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.URL != nil && c.URLFile != "" { return fmt.Errorf("at most one of url & url_file must be configured") } - if c.URL != nil { - if c.URL.Scheme != "https" && c.URL.Scheme != "http" { - return fmt.Errorf("scheme required for webhook url") - } - } return nil } @@ -545,7 +553,7 @@ func (c *WechatConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if !wechatTypeMatcher.MatchString(c.MessageType) { - return errors.Errorf("weChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) + return fmt.Errorf("weChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) } return nil @@ -591,18 +599,18 @@ func (c *OpsGenieConfig) UnmarshalYAML(unmarshal func(interface{}) error) error for _, r := range c.Responders { if r.ID == "" && r.Username == "" && r.Name == "" { - return errors.Errorf("opsGenieConfig responder %v has to have at least one of id, username or name specified", r) + return fmt.Errorf("opsGenieConfig responder %v has to have at least one of id, username or name specified", r) } if strings.Contains(r.Type, "{{") { _, err := template.New("").Parse(r.Type) if err != nil { - return errors.Errorf("opsGenieConfig responder %v type is not a valid template: %v", r, err) + return fmt.Errorf("opsGenieConfig responder %v type is not a valid template: %w", r, err) } } else { r.Type = strings.ToLower(r.Type) if !opsgenieTypeMatcher.MatchString(r.Type) { - return errors.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) + return fmt.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) } } } @@ -694,6 +702,7 @@ type PushoverConfig struct { Priority string `yaml:"priority,omitempty" json:"priority,omitempty"` Retry duration `yaml:"retry,omitempty" json:"retry,omitempty"` Expire duration `yaml:"expire,omitempty" json:"expire,omitempty"` + TTL duration `yaml:"ttl,omitempty" json:"ttl,omitempty"` HTML bool `yaml:"html" json:"html,omitempty"` } @@ -791,13 +800,27 @@ type MSTeamsConfig struct { NotifierConfig `yaml:",inline" json:",inline"` HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` WebhookURL *SecretURL `yaml:"webhook_url,omitempty" json:"webhook_url,omitempty"` + WebhookURLFile string `yaml:"webhook_url_file,omitempty" json:"webhook_url_file,omitempty"` - Title string `yaml:"title,omitempty" json:"title,omitempty"` - Text string `yaml:"text,omitempty" json:"text,omitempty"` + Title string `yaml:"title,omitempty" json:"title,omitempty"` + Summary string `yaml:"summary,omitempty" json:"summary,omitempty"` + Text string `yaml:"text,omitempty" json:"text,omitempty"` } func (c *MSTeamsConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultMSTeamsConfig type plain MSTeamsConfig - return unmarshal((*plain)(c)) + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if c.WebhookURL == nil && c.WebhookURLFile == "" { + return fmt.Errorf("one of webhook_url or webhook_url_file must be configured") + } + + if c.WebhookURL != nil && len(c.WebhookURLFile) > 0 { + return fmt.Errorf("at most one of webhook_url & webhook_url_file must be configured") + } + + return nil } diff --git a/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go b/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go new file mode 100644 index 0000000000..9ff7a2d8fd --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go @@ -0,0 +1,123 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package featurecontrol + +import ( + "errors" + "fmt" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +const ( + FeatureReceiverNameInMetrics = "receiver-name-in-metrics" + FeatureClassicMode = "classic-mode" + FeatureUTF8StrictMode = "utf8-strict-mode" +) + +var AllowedFlags = []string{ + FeatureReceiverNameInMetrics, + FeatureClassicMode, + FeatureUTF8StrictMode, +} + +type Flagger interface { + EnableReceiverNamesInMetrics() bool + ClassicMode() bool + UTF8StrictMode() bool +} + +type Flags struct { + logger log.Logger + enableReceiverNamesInMetrics bool + classicMode bool + utf8StrictMode bool +} + +func (f *Flags) EnableReceiverNamesInMetrics() bool { + return f.enableReceiverNamesInMetrics +} + +func (f *Flags) ClassicMode() bool { + return f.classicMode +} + +func (f *Flags) UTF8StrictMode() bool { + return f.utf8StrictMode +} + +type flagOption func(flags *Flags) + +func enableReceiverNameInMetrics() flagOption { + return func(configs *Flags) { + configs.enableReceiverNamesInMetrics = true + } +} + +func enableClassicMode() flagOption { + return func(configs *Flags) { + configs.classicMode = true + } +} + +func enableUTF8StrictMode() flagOption { + return func(configs *Flags) { + configs.utf8StrictMode = true + } +} + +func NewFlags(logger log.Logger, features string) (Flagger, error) { + fc := &Flags{logger: logger} + opts := []flagOption{} + + if len(features) == 0 { + return NoopFlags{}, nil + } + + for _, feature := range strings.Split(features, ",") { + switch feature { + case FeatureReceiverNameInMetrics: + opts = append(opts, enableReceiverNameInMetrics()) + level.Warn(logger).Log("msg", "Experimental receiver name in metrics enabled") + case FeatureClassicMode: + opts = append(opts, enableClassicMode()) + level.Warn(logger).Log("msg", "Classic mode enabled") + case FeatureUTF8StrictMode: + opts = append(opts, enableUTF8StrictMode()) + level.Warn(logger).Log("msg", "UTF-8 strict mode enabled") + default: + return nil, fmt.Errorf("Unknown option '%s' for --enable-feature", feature) + } + } + + for _, opt := range opts { + opt(fc) + } + + if fc.classicMode && fc.utf8StrictMode { + return nil, errors.New("cannot have both classic and UTF-8 modes enabled") + } + + return fc, nil +} + +type NoopFlags struct{} + +func (n NoopFlags) EnableReceiverNamesInMetrics() bool { return false } + +func (n NoopFlags) ClassicMode() bool { return false } + +func (n NoopFlags) UTF8StrictMode() bool { return false } diff --git a/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go b/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go new file mode 100644 index 0000000000..0c0dfffb1f --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go @@ -0,0 +1,205 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compat + +import ( + "fmt" + "reflect" + "strings" + "unicode/utf8" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + + "github.com/prometheus/alertmanager/featurecontrol" + "github.com/prometheus/alertmanager/matchers/parse" + "github.com/prometheus/alertmanager/pkg/labels" +) + +var ( + isValidLabelName = isValidClassicLabelName(log.NewNopLogger()) + parseMatcher = ClassicMatcherParser(log.NewNopLogger()) + parseMatchers = ClassicMatchersParser(log.NewNopLogger()) +) + +// IsValidLabelName returns true if the string is a valid label name. +func IsValidLabelName(name model.LabelName) bool { + return isValidLabelName(name) +} + +type ParseMatcher func(input, origin string) (*labels.Matcher, error) + +type ParseMatchers func(input, origin string) (labels.Matchers, error) + +// Matcher parses the matcher in the input string. It returns an error +// if the input is invalid or contains two or more matchers. +func Matcher(input, origin string) (*labels.Matcher, error) { + return parseMatcher(input, origin) +} + +// Matchers parses one or more matchers in the input string. It returns +// an error if the input is invalid. +func Matchers(input, origin string) (labels.Matchers, error) { + return parseMatchers(input, origin) +} + +// InitFromFlags initializes the compat package from the flagger. +func InitFromFlags(l log.Logger, f featurecontrol.Flagger) { + if f.ClassicMode() { + isValidLabelName = isValidClassicLabelName(l) + parseMatcher = ClassicMatcherParser(l) + parseMatchers = ClassicMatchersParser(l) + } else if f.UTF8StrictMode() { + isValidLabelName = isValidUTF8LabelName(l) + parseMatcher = UTF8MatcherParser(l) + parseMatchers = UTF8MatchersParser(l) + } else { + isValidLabelName = isValidUTF8LabelName(l) + parseMatcher = FallbackMatcherParser(l) + parseMatchers = FallbackMatchersParser(l) + } +} + +// ClassicMatcherParser uses the pkg/labels parser to parse the matcher in +// the input string. +func ClassicMatcherParser(l log.Logger) ParseMatcher { + return func(input, origin string) (matcher *labels.Matcher, err error) { + level.Debug(l).Log("msg", "Parsing with classic matchers parser", "input", input, "origin", origin) + return labels.ParseMatcher(input) + } +} + +// ClassicMatchersParser uses the pkg/labels parser to parse zero or more +// matchers in the input string. It returns an error if the input is invalid. +func ClassicMatchersParser(l log.Logger) ParseMatchers { + return func(input, origin string) (matchers labels.Matchers, err error) { + level.Debug(l).Log("msg", "Parsing with classic matchers parser", "input", input, "origin", origin) + return labels.ParseMatchers(input) + } +} + +// UTF8MatcherParser uses the new matchers/parse parser to parse the matcher +// in the input string. If this fails it does not revert to the pkg/labels parser. +func UTF8MatcherParser(l log.Logger) ParseMatcher { + return func(input, origin string) (matcher *labels.Matcher, err error) { + level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser", "input", input, "origin", origin) + if strings.HasPrefix(input, "{") || strings.HasSuffix(input, "}") { + return nil, fmt.Errorf("unexpected open or close brace: %s", input) + } + return parse.Matcher(input) + } +} + +// UTF8MatchersParser uses the new matchers/parse parser to parse zero or more +// matchers in the input string. If this fails it does not revert to the +// pkg/labels parser. +func UTF8MatchersParser(l log.Logger) ParseMatchers { + return func(input, origin string) (matchers labels.Matchers, err error) { + level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser", "input", input, "origin", origin) + return parse.Matchers(input) + } +} + +// FallbackMatcherParser uses the new matchers/parse parser to parse zero or more +// matchers in the string. If this fails it reverts to the pkg/labels parser and +// emits a warning log line. +func FallbackMatcherParser(l log.Logger) ParseMatcher { + return func(input, origin string) (matcher *labels.Matcher, err error) { + level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin) + if strings.HasPrefix(input, "{") || strings.HasSuffix(input, "}") { + return nil, fmt.Errorf("unexpected open or close brace: %s", input) + } + // Parse the input in both parsers to look for disagreement and incompatible + // inputs. + nMatcher, nErr := parse.Matcher(input) + cMatcher, cErr := labels.ParseMatcher(input) + if nErr != nil { + // If the input is invalid in both parsers, return the error. + if cErr != nil { + return nil, cErr + } + // The input is valid in the pkg/labels parser, but not the matchers/parse + // parser. This means the input is not forwards compatible. + suggestion := cMatcher.String() + level.Warn(l).Log("msg", "Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion) + return cMatcher, nil + } + // If the input is valid in both parsers, but produces different results, + // then there is disagreement. + if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatcher, cMatcher) { + level.Warn(l).Log("msg", "Matchers input has disagreement", "input", input, "origin", origin) + return cMatcher, nil + } + return nMatcher, nil + } +} + +// FallbackMatchersParser uses the new matchers/parse parser to parse the +// matcher in the input string. If this fails it falls back to the pkg/labels +// parser and emits a warning log line. +func FallbackMatchersParser(l log.Logger) ParseMatchers { + return func(input, origin string) (matchers labels.Matchers, err error) { + level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin) + // Parse the input in both parsers to look for disagreement and incompatible + // inputs. + nMatchers, nErr := parse.Matchers(input) + cMatchers, cErr := labels.ParseMatchers(input) + if nErr != nil { + // If the input is invalid in both parsers, return the error. + if cErr != nil { + return nil, cErr + } + // The input is valid in the pkg/labels parser, but not the matchers/parse + // parser. This means the input is not forwards compatible. + var sb strings.Builder + for i, n := range cMatchers { + sb.WriteString(n.String()) + if i < len(cMatchers)-1 { + sb.WriteRune(',') + } + } + suggestion := sb.String() + // The input is valid in the pkg/labels parser, but not the + // new matchers/parse parser. + level.Warn(l).Log("msg", "Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion) + return cMatchers, nil + } + // If the input is valid in both parsers, but produces different results, + // then there is disagreement. We need to compare to labels.Matchers(cMatchers) + // as cMatchers is a []*labels.Matcher not labels.Matchers. + if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatchers, labels.Matchers(cMatchers)) { + level.Warn(l).Log("msg", "Matchers input has disagreement", "input", input, "origin", origin) + return cMatchers, nil + } + return nMatchers, nil + } +} + +// isValidClassicLabelName returns true if the string is a valid classic label name. +func isValidClassicLabelName(_ log.Logger) func(model.LabelName) bool { + return func(name model.LabelName) bool { + return name.IsValid() + } +} + +// isValidUTF8LabelName returns true if the string is a valid UTF-8 label name. +func isValidUTF8LabelName(_ log.Logger) func(model.LabelName) bool { + return func(name model.LabelName) bool { + if len(name) == 0 { + return false + } + return utf8.ValidString(string(name)) + } +} diff --git a/vendor/github.com/prometheus/alertmanager/matchers/parse/lexer.go b/vendor/github.com/prometheus/alertmanager/matchers/parse/lexer.go new file mode 100644 index 0000000000..d6daa6a9e8 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/matchers/parse/lexer.go @@ -0,0 +1,309 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + eof rune = -1 +) + +func isReserved(r rune) bool { + return unicode.IsSpace(r) || strings.ContainsRune("{}!=~,\\\"'`", r) +} + +// expectedError is returned when the next rune does not match what is expected. +type expectedError struct { + position + input string + expected string +} + +func (e expectedError) Error() string { + if e.offsetEnd >= len(e.input) { + return fmt.Sprintf("%d:%d: unexpected end of input, expected one of '%s'", + e.columnStart, + e.columnEnd, + e.expected, + ) + } + return fmt.Sprintf("%d:%d: %s: expected one of '%s'", + e.columnStart, + e.columnEnd, + e.input[e.offsetStart:e.offsetEnd], + e.expected, + ) +} + +// invalidInputError is returned when the next rune in the input does not match +// the grammar of Prometheus-like matchers. +type invalidInputError struct { + position + input string +} + +func (e invalidInputError) Error() string { + return fmt.Sprintf("%d:%d: %s: invalid input", + e.columnStart, + e.columnEnd, + e.input[e.offsetStart:e.offsetEnd], + ) +} + +// unterminatedError is returned when text in quotes does not have a closing quote. +type unterminatedError struct { + position + input string + quote rune +} + +func (e unterminatedError) Error() string { + return fmt.Sprintf("%d:%d: %s: missing end %c", + e.columnStart, + e.columnEnd, + e.input[e.offsetStart:e.offsetEnd], + e.quote, + ) +} + +// lexer scans a sequence of tokens that match the grammar of Prometheus-like +// matchers. A token is emitted for each call to scan() which returns the +// next token in the input or an error if the input does not conform to the +// grammar. A token can be one of a number of kinds and corresponds to a +// subslice of the input. Once the input has been consumed successive calls to +// scan() return a tokenEOF token. +type lexer struct { + input string + err error + start int // The offset of the current token. + pos int // The position of the cursor in the input. + width int // The width of the last rune. + column int // The column offset of the current token. + cols int // The number of columns (runes) decoded from the input. +} + +// Scans the next token in the input or an error if the input does not +// conform to the grammar. Once the input has been consumed successive +// calls scan() return a tokenEOF token. +func (l *lexer) scan() (token, error) { + t := token{} + // Do not attempt to emit more tokens if the input is invalid. + if l.err != nil { + return t, l.err + } + // Iterate over each rune in the input and either emit a token or an error. + for r := l.next(); r != eof; r = l.next() { + switch { + case r == '{': + t = l.emit(tokenOpenBrace) + return t, l.err + case r == '}': + t = l.emit(tokenCloseBrace) + return t, l.err + case r == ',': + t = l.emit(tokenComma) + return t, l.err + case r == '=' || r == '!': + l.rewind() + t, l.err = l.scanOperator() + return t, l.err + case r == '"': + l.rewind() + t, l.err = l.scanQuoted() + return t, l.err + case !isReserved(r): + l.rewind() + t, l.err = l.scanUnquoted() + return t, l.err + case unicode.IsSpace(r): + l.skip() + default: + l.err = invalidInputError{ + position: l.position(), + input: l.input, + } + return t, l.err + } + } + return t, l.err +} + +func (l *lexer) scanOperator() (token, error) { + // If the first rune is an '!' then it must be followed with either an + // '=' or '~' to not match a string or regex. + if l.accept("!") { + if l.accept("=") { + return l.emit(tokenNotEquals), nil + } + if l.accept("~") { + return l.emit(tokenNotMatches), nil + } + return token{}, expectedError{ + position: l.position(), + input: l.input, + expected: "=~", + } + } + // If the first rune is an '=' then it can be followed with an optional + // '~' to match a regex. + if l.accept("=") { + if l.accept("~") { + return l.emit(tokenMatches), nil + } + return l.emit(tokenEquals), nil + } + return token{}, expectedError{ + position: l.position(), + input: l.input, + expected: "!=", + } +} + +func (l *lexer) scanQuoted() (token, error) { + if err := l.expect("\""); err != nil { + return token{}, err + } + var isEscaped bool + for r := l.next(); r != eof; r = l.next() { + if isEscaped { + isEscaped = false + } else if r == '\\' { + isEscaped = true + } else if r == '"' { + l.rewind() + break + } + } + if err := l.expect("\""); err != nil { + return token{}, unterminatedError{ + position: l.position(), + input: l.input, + quote: '"', + } + } + return l.emit(tokenQuoted), nil +} + +func (l *lexer) scanUnquoted() (token, error) { + for r := l.next(); r != eof; r = l.next() { + if isReserved(r) { + l.rewind() + break + } + } + return l.emit(tokenUnquoted), nil +} + +// peek the next token in the input or an error if the input does not +// conform to the grammar. Once the input has been consumed successive +// calls peek() return a tokenEOF token. +func (l *lexer) peek() (token, error) { + start := l.start + pos := l.pos + width := l.width + column := l.column + cols := l.cols + // Do not reset l.err because we can return it on the next call to scan(). + defer func() { + l.start = start + l.pos = pos + l.width = width + l.column = column + l.cols = cols + }() + return l.scan() +} + +// position returns the position of the last emitted token. +func (l *lexer) position() position { + return position{ + offsetStart: l.start, + offsetEnd: l.pos, + columnStart: l.column, + columnEnd: l.cols, + } +} + +// accept consumes the next if its one of the valid runes. +// It returns true if the next rune was accepted, otherwise false. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.rewind() + return false +} + +// expect consumes the next rune if its one of the valid runes. +// it returns nil if the next rune is valid, otherwise an expectedError +// error. +func (l *lexer) expect(valid string) error { + if strings.ContainsRune(valid, l.next()) { + return nil + } + l.rewind() + return expectedError{ + position: l.position(), + input: l.input, + expected: valid, + } +} + +// emits returns the scanned input as a token. +func (l *lexer) emit(kind tokenKind) token { + t := token{ + kind: kind, + value: l.input[l.start:l.pos], + position: l.position(), + } + l.start = l.pos + l.column = l.cols + return t +} + +// next returns the next rune in the input or eof. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, width := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = width + l.pos += width + l.cols++ + return r +} + +// rewind the last rune in the input. It should not be called more than once +// between consecutive calls of next. +func (l *lexer) rewind() { + l.pos -= l.width + // When the next rune in the input is eof the width is zero. This check + // prevents cols from being decremented when the next rune being accepted + // is instead eof. + if l.width > 0 { + l.cols-- + } +} + +// skip the scanned input between start and pos. +func (l *lexer) skip() { + l.start = l.pos + l.column = l.cols +} diff --git a/vendor/github.com/prometheus/alertmanager/matchers/parse/parse.go b/vendor/github.com/prometheus/alertmanager/matchers/parse/parse.go new file mode 100644 index 0000000000..30a95b2554 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/matchers/parse/parse.go @@ -0,0 +1,304 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parse + +import ( + "errors" + "fmt" + "os" + "runtime/debug" + + "github.com/prometheus/alertmanager/pkg/labels" +) + +var ( + errEOF = errors.New("end of input") + errExpectedEOF = errors.New("expected end of input") + errNoOpenBrace = errors.New("expected opening brace") + errNoCloseBrace = errors.New("expected close brace") + errNoLabelName = errors.New("expected label name") + errNoLabelValue = errors.New("expected label value") + errNoOperator = errors.New("expected an operator such as '=', '!=', '=~' or '!~'") + errExpectedComma = errors.New("expected a comma") + errExpectedCommaOrCloseBrace = errors.New("expected a comma or close brace") + errExpectedMatcherOrCloseBrace = errors.New("expected a matcher or close brace after comma") +) + +// Matchers parses one or more matchers in the input string. It returns an error +// if the input is invalid. +func Matchers(input string) (matchers labels.Matchers, err error) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "parser panic: %s, %s", r, debug.Stack()) + err = errors.New("parser panic: this should never happen, check stderr for the stack trace") + } + }() + p := parser{lexer: lexer{input: input}} + return p.parse() +} + +// Matcher parses the matcher in the input string. It returns an error +// if the input is invalid or contains two or more matchers. +func Matcher(input string) (*labels.Matcher, error) { + m, err := Matchers(input) + if err != nil { + return nil, err + } + switch len(m) { + case 1: + return m[0], nil + case 0: + return nil, fmt.Errorf("no matchers") + default: + return nil, fmt.Errorf("expected 1 matcher, found %d", len(m)) + } +} + +// parseFunc is state in the finite state automata. +type parseFunc func(l *lexer) (parseFunc, error) + +// parser reads the sequence of tokens from the lexer and returns either a +// series of matchers or an error. It works as a finite state automata, where +// each state in the automata is a parseFunc. The finite state automata can move +// from one state to another by returning the next parseFunc. It terminates when +// a parseFunc returns nil as the next parseFunc, if the lexer attempts to scan +// input that does not match the expected grammar, or if the tokens returned from +// the lexer cannot be parsed into a complete series of matchers. +type parser struct { + matchers labels.Matchers + // Tracks if the input starts with an open brace and if we should expect to + // parse a close brace at the end of the input. + hasOpenBrace bool + lexer lexer +} + +func (p *parser) parse() (labels.Matchers, error) { + var ( + err error + fn = p.parseOpenBrace + l = &p.lexer + ) + for { + if fn, err = fn(l); err != nil { + return nil, err + } else if fn == nil { + break + } + } + return p.matchers, nil +} + +func (p *parser) parseOpenBrace(l *lexer) (parseFunc, error) { + var ( + hasCloseBrace bool + err error + ) + // Can start with an optional open brace. + p.hasOpenBrace, err = p.accept(l, tokenOpenBrace) + if err != nil { + if errors.Is(err, errEOF) { + return p.parseEOF, nil + } + return nil, err + } + // If the next token is a close brace there are no matchers in the input. + hasCloseBrace, err = p.acceptPeek(l, tokenCloseBrace) + if err != nil { + // If there is no more input after the open brace then parse the close brace + // so the error message contains ErrNoCloseBrace. + if errors.Is(err, errEOF) { + return p.parseCloseBrace, nil + } + return nil, err + } + if hasCloseBrace { + return p.parseCloseBrace, nil + } + return p.parseMatcher, nil +} + +func (p *parser) parseCloseBrace(l *lexer) (parseFunc, error) { + if p.hasOpenBrace { + // If there was an open brace there must be a matching close brace. + if _, err := p.expect(l, tokenCloseBrace); err != nil { + return nil, fmt.Errorf("0:%d: %w: %w", l.position().columnEnd, err, errNoCloseBrace) + } + } else { + // If there was no open brace there must not be a close brace either. + if _, err := p.expect(l, tokenCloseBrace); err == nil { + return nil, fmt.Errorf("0:%d: }: %w", l.position().columnEnd, errNoOpenBrace) + } + } + return p.parseEOF, nil +} + +func (p *parser) parseMatcher(l *lexer) (parseFunc, error) { + var ( + err error + t token + matchName, matchValue string + matchTy labels.MatchType + ) + // The first token should be the label name. + if t, err = p.expect(l, tokenQuoted, tokenUnquoted); err != nil { + return nil, fmt.Errorf("%w: %w", err, errNoLabelName) + } + matchName, err = t.unquote() + if err != nil { + return nil, fmt.Errorf("%d:%d: %s: invalid input", t.columnStart, t.columnEnd, t.value) + } + // The next token should be the operator. + if t, err = p.expect(l, tokenEquals, tokenNotEquals, tokenMatches, tokenNotMatches); err != nil { + return nil, fmt.Errorf("%w: %w", err, errNoOperator) + } + switch t.kind { + case tokenEquals: + matchTy = labels.MatchEqual + case tokenNotEquals: + matchTy = labels.MatchNotEqual + case tokenMatches: + matchTy = labels.MatchRegexp + case tokenNotMatches: + matchTy = labels.MatchNotRegexp + default: + panic(fmt.Sprintf("bad operator %s", t)) + } + // The next token should be the match value. Like the match name, this too + // can be either double-quoted UTF-8 or unquoted UTF-8 without reserved characters. + if t, err = p.expect(l, tokenUnquoted, tokenQuoted); err != nil { + return nil, fmt.Errorf("%w: %w", err, errNoLabelValue) + } + matchValue, err = t.unquote() + if err != nil { + return nil, fmt.Errorf("%d:%d: %s: invalid input", t.columnStart, t.columnEnd, t.value) + } + m, err := labels.NewMatcher(matchTy, matchName, matchValue) + if err != nil { + return nil, fmt.Errorf("failed to create matcher: %w", err) + } + p.matchers = append(p.matchers, m) + return p.parseEndOfMatcher, nil +} + +func (p *parser) parseEndOfMatcher(l *lexer) (parseFunc, error) { + t, err := p.expectPeek(l, tokenComma, tokenCloseBrace) + if err != nil { + if errors.Is(err, errEOF) { + // If this is the end of input we still need to check if the optional + // open brace has a matching close brace + return p.parseCloseBrace, nil + } + return nil, fmt.Errorf("%w: %w", err, errExpectedCommaOrCloseBrace) + } + switch t.kind { + case tokenComma: + return p.parseComma, nil + case tokenCloseBrace: + return p.parseCloseBrace, nil + default: + panic(fmt.Sprintf("bad token %s", t)) + } +} + +func (p *parser) parseComma(l *lexer) (parseFunc, error) { + if _, err := p.expect(l, tokenComma); err != nil { + return nil, fmt.Errorf("%w: %w", err, errExpectedComma) + } + // The token after the comma can be another matcher, a close brace or end of input. + t, err := p.expectPeek(l, tokenCloseBrace, tokenUnquoted, tokenQuoted) + if err != nil { + if errors.Is(err, errEOF) { + // If this is the end of input we still need to check if the optional + // open brace has a matching close brace + return p.parseCloseBrace, nil + } + return nil, fmt.Errorf("%w: %w", err, errExpectedMatcherOrCloseBrace) + } + if t.kind == tokenCloseBrace { + return p.parseCloseBrace, nil + } + return p.parseMatcher, nil +} + +func (p *parser) parseEOF(l *lexer) (parseFunc, error) { + t, err := l.scan() + if err != nil { + return nil, fmt.Errorf("%w: %w", err, errExpectedEOF) + } + if !t.isEOF() { + return nil, fmt.Errorf("%d:%d: %s: %w", t.columnStart, t.columnEnd, t.value, errExpectedEOF) + } + return nil, nil +} + +// accept returns true if the next token is one of the specified kinds, +// otherwise false. If the token is accepted it is consumed. tokenEOF is +// not an accepted kind and instead accept returns ErrEOF if there is no +// more input. +func (p *parser) accept(l *lexer, kinds ...tokenKind) (ok bool, err error) { + ok, err = p.acceptPeek(l, kinds...) + if ok { + if _, err = l.scan(); err != nil { + panic("failed to scan peeked token") + } + } + return ok, err +} + +// acceptPeek returns true if the next token is one of the specified kinds, +// otherwise false. However, unlike accept, acceptPeek does not consume accepted +// tokens. tokenEOF is not an accepted kind and instead accept returns ErrEOF +// if there is no more input. +func (p *parser) acceptPeek(l *lexer, kinds ...tokenKind) (bool, error) { + t, err := l.peek() + if err != nil { + return false, err + } + if t.isEOF() { + return false, errEOF + } + return t.isOneOf(kinds...), nil +} + +// expect returns the next token if it is one of the specified kinds, otherwise +// it returns an error. If the token is expected it is consumed. tokenEOF is not +// an accepted kind and instead expect returns ErrEOF if there is no more input. +func (p *parser) expect(l *lexer, kind ...tokenKind) (token, error) { + t, err := p.expectPeek(l, kind...) + if err != nil { + return t, err + } + if _, err = l.scan(); err != nil { + panic("failed to scan peeked token") + } + return t, nil +} + +// expect returns the next token if it is one of the specified kinds, otherwise +// it returns an error. However, unlike expect, expectPeek does not consume tokens. +// tokenEOF is not an accepted kind and instead expect returns ErrEOF if there is no +// more input. +func (p *parser) expectPeek(l *lexer, kind ...tokenKind) (token, error) { + t, err := l.peek() + if err != nil { + return t, err + } + if t.isEOF() { + return t, errEOF + } + if !t.isOneOf(kind...) { + return t, fmt.Errorf("%d:%d: unexpected %s", t.columnStart, t.columnEnd, t.value) + } + return t, nil +} diff --git a/vendor/github.com/prometheus/alertmanager/matchers/parse/token.go b/vendor/github.com/prometheus/alertmanager/matchers/parse/token.go new file mode 100644 index 0000000000..96baeeef43 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/matchers/parse/token.go @@ -0,0 +1,108 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parse + +import ( + "errors" + "fmt" + "strconv" + "unicode/utf8" +) + +type tokenKind int + +const ( + tokenEOF tokenKind = iota + tokenOpenBrace + tokenCloseBrace + tokenComma + tokenEquals + tokenNotEquals + tokenMatches + tokenNotMatches + tokenQuoted + tokenUnquoted +) + +func (k tokenKind) String() string { + switch k { + case tokenOpenBrace: + return "OpenBrace" + case tokenCloseBrace: + return "CloseBrace" + case tokenComma: + return "Comma" + case tokenEquals: + return "Equals" + case tokenNotEquals: + return "NotEquals" + case tokenMatches: + return "Matches" + case tokenNotMatches: + return "NotMatches" + case tokenQuoted: + return "Quoted" + case tokenUnquoted: + return "Unquoted" + default: + return "EOF" + } +} + +type token struct { + kind tokenKind + value string + position +} + +// isEOF returns true if the token is an end of file token. +func (t token) isEOF() bool { + return t.kind == tokenEOF +} + +// isOneOf returns true if the token is one of the specified kinds. +func (t token) isOneOf(kinds ...tokenKind) bool { + for _, k := range kinds { + if k == t.kind { + return true + } + } + return false +} + +// unquote the value in token. If unquoted returns it unmodified. +func (t token) unquote() (string, error) { + if t.kind == tokenQuoted { + unquoted, err := strconv.Unquote(t.value) + if err != nil { + return "", err + } + if !utf8.ValidString(unquoted) { + return "", errors.New("quoted string contains invalid UTF-8 code points") + } + return unquoted, nil + } + return t.value, nil +} + +func (t token) String() string { + return fmt.Sprintf("(%s) '%s'", t.kind, t.value) +} + +type position struct { + offsetStart int // The start position in the input. + offsetEnd int // The end position in the input. + columnStart int // The column number. + columnEnd int // The end of the column. +} diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/matcher.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/matcher.go index 445c905651..f37fcb2173 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/matcher.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/matcher.go @@ -18,7 +18,9 @@ import ( "encoding/json" "fmt" "regexp" + "strconv" "strings" + "unicode" "github.com/prometheus/common/model" ) @@ -74,6 +76,9 @@ func NewMatcher(t MatchType, n, v string) (*Matcher, error) { } func (m *Matcher) String() string { + if strings.ContainsFunc(m.Name, isReserved) { + return fmt.Sprintf(`%s%s%s`, strconv.Quote(m.Name), m.Type, strconv.Quote(m.Value)) + } return fmt.Sprintf(`%s%s"%s"`, m.Name, m.Type, openMetricsEscape(m.Value)) } @@ -199,3 +204,13 @@ func (ms Matchers) String() string { return buf.String() } + +// This is copied from matchers/parse/lexer.go. It will be removed when +// the transition window from classic matchers to UTF-8 matchers is complete, +// as then we can use double quotes when printing the label name for all +// matchers. Until then, the classic parser does not understand double quotes +// around the label name, so we use this function as a heuristic to tell if +// the matcher was parsed with the UTF-8 parser or the classic parser. +func isReserved(r rune) bool { + return unicode.IsSpace(r) || strings.ContainsRune("{}!=~,\\\"'`", r) +} diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go index a125d59d8a..5138716e0a 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go @@ -14,11 +14,10 @@ package labels import ( + "fmt" "regexp" "strings" "unicode/utf8" - - "github.com/pkg/errors" ) var ( @@ -118,7 +117,7 @@ func ParseMatchers(s string) ([]*Matcher, error) { func ParseMatcher(s string) (_ *Matcher, err error) { ms := re.FindStringSubmatch(s) if len(ms) == 0 { - return nil, errors.Errorf("bad matcher format: %s", s) + return nil, fmt.Errorf("bad matcher format: %s", s) } var ( @@ -134,7 +133,7 @@ func ParseMatcher(s string) (_ *Matcher, err error) { } if !utf8.ValidString(rawValue) { - return nil, errors.Errorf("matcher value not valid UTF-8: %s", ms[3]) + return nil, fmt.Errorf("matcher value not valid UTF-8: %s", ms[3]) } // Unescape the rawValue: @@ -163,7 +162,7 @@ func ParseMatcher(s string) (_ *Matcher, err error) { value.WriteByte('\\') case '"': if !expectTrailingQuote || i < len(rawValue)-1 { - return nil, errors.Errorf("matcher value contains unescaped double quote: %s", ms[3]) + return nil, fmt.Errorf("matcher value contains unescaped double quote: %s", ms[3]) } expectTrailingQuote = false default: @@ -172,7 +171,7 @@ func ParseMatcher(s string) (_ *Matcher, err error) { } if expectTrailingQuote { - return nil, errors.Errorf("matcher value contains unescaped double quote: %s", ms[3]) + return nil, fmt.Errorf("matcher value contains unescaped double quote: %s", ms[3]) } return NewMatcher(typeMap[ms[2]], ms[1], value.String()) diff --git a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go index a5018aaef9..fe8c97d729 100644 --- a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go +++ b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go @@ -27,6 +27,35 @@ import ( "gopkg.in/yaml.v2" ) +// Intervener determines whether a given time and active route time interval should mute outgoing notifications. +// It implements the TimeMuter interface. +type Intervener struct { + intervals map[string][]TimeInterval +} + +func (i *Intervener) Mutes(names []string, now time.Time) (bool, error) { + for _, name := range names { + interval, ok := i.intervals[name] + if !ok { + return false, fmt.Errorf("time interval %s doesn't exist in config", name) + } + + for _, ti := range interval { + if ti.ContainsTime(now.UTC()) { + return true, nil + } + } + } + + return false, nil +} + +func NewIntervener(ti map[string][]TimeInterval) *Intervener { + return &Intervener{ + intervals: ti, + } +} + // TimeInterval describes intervals of time. ContainsTime will tell you if a golang time is contained // within the interval. type TimeInterval struct { @@ -436,9 +465,6 @@ func (ir InclusiveRange) MarshalYAML() (interface{}, error) { return string(bytes), err } -// TimeLayout specifies the layout to be used in time.Parse() calls for time intervals. -const TimeLayout = "15:04" - var ( validTime = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" validTimeRE = regexp.MustCompile(validTime) diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee..b9cc55abbb 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go index 72a01309c3..afcf122efc 100644 --- a/vendor/github.com/prometheus/client_golang/api/client.go +++ b/vendor/github.com/prometheus/client_golang/api/client.go @@ -79,6 +79,10 @@ type Client interface { Do(context.Context, *http.Request) (*http.Response, []byte, error) } +type CloseIdler interface { + CloseIdleConnections() +} + // NewClient returns a new Client. // // It is safe to use the returned Client from multiple goroutines. @@ -118,6 +122,10 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL { return &u } +func (c *httpClient) CloseIdleConnections() { + c.client.CloseIdleConnections() +} + func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { if ctx != nil { req = req.WithContext(ctx) diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 1cfe8d863c..cddf027fda 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -475,9 +475,9 @@ type API interface { // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. - LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) + LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. - LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) + LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) // Query performs a query for the given time. Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) // QueryRange performs a query for the given range. @@ -489,7 +489,7 @@ type API interface { // Runtimeinfo returns the various runtime information properties about the Prometheus server. Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) // Series finds series by label matchers. - Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) + Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) // Snapshot creates a snapshot of all current data into snapshots/- // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) @@ -502,7 +502,7 @@ type API interface { // Metadata returns metadata about metrics currently scraped by the metric name. Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) // TSDB returns the cardinality statistics. - TSDB(ctx context.Context) (TSDBResult, error) + TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) // WalReplay returns the current replay status of the wal. WalReplay(ctx context.Context) (WalReplayStatus, error) } @@ -1024,9 +1024,10 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, err } -func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1046,9 +1047,10 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, e return labelNames, w, err } -func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) { +func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1076,6 +1078,7 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin type apiOptions struct { timeout time.Duration + limit uint64 } type Option func(c *apiOptions) @@ -1088,20 +1091,35 @@ func WithTimeout(timeout time.Duration) Option { } } -func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { - u := h.client.URL(epQuery, nil) - q := u.Query() +// WithLimit provides an optional maximum number of returned entries for APIs that support limit parameter +// e.g. https://prometheus.io/docs/prometheus/latest/querying/api/#instant-querie:~:text=%3A%20End%20timestamp.-,limit%3D%3Cnumber%3E,-%3A%20Maximum%20number%20of +func WithLimit(limit uint64) Option { + return func(o *apiOptions) { + o.limit = limit + } +} +func addOptionalURLParams(q url.Values, opts []Option) url.Values { opt := &apiOptions{} for _, o := range opts { o(opt) } - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) + if opt.timeout > 0 { + q.Set("timeout", opt.timeout.String()) } + if opt.limit > 0 { + q.Set("limit", strconv.FormatUint(opt.limit, 10)) + } + + return q +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { + u := h.client.URL(epQuery, nil) + q := addOptionalURLParams(u.Query(), opts) + q.Set("query", query) if !ts.IsZero() { q.Set("time", formatTime(ts)) @@ -1118,36 +1136,25 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts .. func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) { u := h.client.URL(epQueryRange, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) q.Set("query", query) q.Set("start", formatTime(r.Start)) q.Set("end", formatTime(r.End)) q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) - opt := &apiOptions{} - for _, o := range opts { - o(opt) - } - - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) - } - _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var qres queryResult - return qres.v, warnings, json.Unmarshal(body, &qres) } -func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) { +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) { u := h.client.URL(epSeries, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) for _, m := range matches { q.Add("match[]", m) @@ -1166,8 +1173,7 @@ func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTi } var mset []model.LabelSet - err = json.Unmarshal(body, &mset) - return mset, warnings, err + return mset, warnings, json.Unmarshal(body, &mset) } func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { @@ -1278,8 +1284,10 @@ func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[strin return res, err } -func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { +func (h *httpAPI) TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) { u := h.client.URL(epTSDB, nil) + q := addOptionalURLParams(u.Query(), opts) + u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000000..8547c8dfd1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 0000000000..2e45780b74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e..cc4ef1077e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 68ffe3c248..ad347113c0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -189,12 +189,15 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } - vlStrings := make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } } } return fmt.Sprintf( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0..520cbd7d41 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f4..6b8684731c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s.", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395..c453b754a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -28,6 +29,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 +) + // nativeHistogramBounds for the frac of observed values. Only relevant for // schema > 0. The position in the slice is the schema. (0 is never used, just // here for convenience of using the schema directly as the index.) @@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -440,7 +446,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +479,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +554,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +579,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +749,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +767,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +849,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -829,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error { // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } @@ -1091,8 +1146,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1159,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1397,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1363,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 { floor := math.Floor(math.Log2(math.Log2(bucketFactor))) switch { case floor <= -8: - return 8 + return nativeHistogramSchemaMaximum case floor >= 4: - return -4 + return nativeHistogramSchemaMinimum default: return -int32(floor) } @@ -1575,3 +1678,379 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index a595a20362..8b016355ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -22,17 +22,18 @@ import ( "bytes" "fmt" "io" + "strconv" "strings" ) -func min(a, b int) int { +func minInt(a, b int) int { if a < b { return a } return b } -func max(a, b int) int { +func maxInt(a, b int) int { if a > b { return a } @@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { if codes[0].Tag == 'e' { c := codes[0] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} } if codes[len(codes)-1].Tag == 'e' { c := codes[len(codes)-1] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} } nn := n + n groups := [][]OpCode{} @@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { // there is a large range with no changes. if c.Tag == 'e' && i2-i1 > nn { group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), }) groups = append(groups, group) group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } @@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 { // is faster to compute than either .Ratio() or .QuickRatio(). func (m *SequenceMatcher) RealQuickRatio() float64 { la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) + return calculateRatio(minInt(la, lb), la+lb) } // Convert range to the "ed" format @@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string { beginning := start + 1 // lines start numbering with one length := stop - start if length == 1 { - return fmt.Sprintf("%d", beginning) + return strconv.Itoa(beginning) } if length == 0 { beginning-- // empty ranges begin at line just before the range diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d644..a4fa6eabd7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index 97d17d6cb6..f7f97ef926 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237..592eec3e24 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") + + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) + + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") } - return name + + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) + + return sb.String() } type invalidMetric struct { @@ -234,7 +242,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed..e7bce8b58e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,16 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +102,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -111,24 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -136,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 0000000000..0a61b98461 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,130 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go deleted file mode 100644 index b1e363d6cf..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js -// +build js - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c new file mode 100644 index 0000000000..d00a24315d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go new file mode 100644 index 0000000000..9ac53f9992 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go new file mode 100644 index 0000000000..8ddb0995d6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go new file mode 100644 index 0000000000..7732b7f376 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 || js || ios +// +build wasip1 js ios + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go deleted file mode 100644 index 8c1136ceea..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !js && !wasip1 -// +build !windows,!js,!wasip1 - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go new file mode 100644 index 0000000000..9f4b130bef --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -0,0 +1,96 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } +} + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go deleted file mode 100644 index d8d9a6d7a2..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build wasip1 -// +build wasip1 - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (*processCollector) processCollect(chan<- Metric) { - // noop on this platform - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df2..fa474289ef 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83..315eab5f17 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbea..28eed26727 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,22 +190,30 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) + var enc expfmt.Encoder + if opts.EnableOpenMetricsTextCreatedSamples { + enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) + } else { + enc = expfmt.NewEncoder(w, contentType) + } // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. @@ -343,9 +376,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -371,6 +414,21 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic + // Created Timestamps for counters, histograms and summaries, which for the current + // version of OpenMetrics are defined as extra series with the same name and "_created" + // suffix. See also the OpenMetrics specification for more details + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 + // + // Created timestamps are used to improve the accuracy of reset detection, + // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality + // if the scraper does not handle those metrics correctly (converting to created timestamp + // instead of leaving those series as-is). New OpenMetrics versions might improve + // this situation. + // + // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' + // in version 2.50.0 to handle this situation. + EnableOpenMetricsTextCreatedSamples bool // ProcessStartTime allows setting process start timevalue that will be exposed // with "Process-Start-Time-Unix" response header along with the metrics // payload. This allow callers to have efficient transformations to cumulative @@ -381,19 +439,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +451,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a0..c6fd2f58b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446..ac5203c6fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -280,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error { s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() @@ -783,3 +786,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index dd29cccc30..ea46f38ecf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -68,7 +68,7 @@ func (l *Linter) Lint() ([]Problem, error) { var problems []Problem if l.r != nil { - d := expfmt.NewDecoder(l.r, expfmt.FmtText) + d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain)) mf := &dto.MetricFamily{} for { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab6..e1441598da 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 0000000000..68645ed0a9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, errors.New("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16..de52cfee44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 269f56435b..1258508e4f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -39,14 +39,15 @@ package testutil import ( "bytes" + "errors" "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" @@ -159,6 +160,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -184,9 +188,11 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -199,6 +205,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -207,6 +216,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -222,6 +234,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -265,85 +302,24 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { - return fmt.Errorf(diffErr) + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { + return errors.New(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f8..2c808eece0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db7f..2f15490758 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a927..126df9e67a 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3e3..e00f3b365b 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 062a281856..1617292350 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,23 +49,23 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum > /dev/null),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v1.59.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) @@ -169,16 +169,20 @@ common-vet: common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint > /dev/null)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -204,6 +208,10 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 28783e2ddc..cdcc8a7ccc 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 4a173636c9..8380750090 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f4f5501c68..f0950bb495 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) } field := strings.SplitN(firstLine, ": ", 2) @@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 9a73e26393..5f2a37a78b 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f560a8db30..cf2e3eaa03 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 5a145bbfe1..bc3a20c932 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, - fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 59465c5bbc..332e76c17f 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index fdd4b95445..67a9d2b448 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index eaf00e2248..4b2c4050a3 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -126,6 +126,7 @@ type Meminfo struct { VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free VmallocChunk *uint64 + Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 ShmemHugePages *uint64 @@ -140,6 +141,55 @@ type Meminfo struct { DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil @@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { var m Meminfo s := bufio.NewScanner(r) for s.Scan() { - // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } + var val, valBytes uint64 - v, err := strconv.ParseUint(fields[1], 0, 64) + val, err := strconv.ParseUint(fields[1], 0, 64) if err != nil { return nil, err } + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + switch fields[0] { case "MemTotal:": - m.MemTotal = &v + m.MemTotal = &val + m.MemTotalBytes = &valBytes case "MemFree:": - m.MemFree = &v + m.MemFree = &val + m.MemFreeBytes = &valBytes case "MemAvailable:": - m.MemAvailable = &v + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes case "Buffers:": - m.Buffers = &v + m.Buffers = &val + m.BuffersBytes = &valBytes case "Cached:": - m.Cached = &v + m.Cached = &val + m.CachedBytes = &valBytes case "SwapCached:": - m.SwapCached = &v + m.SwapCached = &val + m.SwapCachedBytes = &valBytes case "Active:": - m.Active = &v + m.Active = &val + m.ActiveBytes = &valBytes case "Inactive:": - m.Inactive = &v + m.Inactive = &val + m.InactiveBytes = &valBytes case "Active(anon):": - m.ActiveAnon = &v + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes case "Inactive(anon):": - m.InactiveAnon = &v + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes case "Active(file):": - m.ActiveFile = &v + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes case "Inactive(file):": - m.InactiveFile = &v + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes case "Unevictable:": - m.Unevictable = &v + m.Unevictable = &val + m.UnevictableBytes = &valBytes case "Mlocked:": - m.Mlocked = &v + m.Mlocked = &val + m.MlockedBytes = &valBytes case "SwapTotal:": - m.SwapTotal = &v + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes case "SwapFree:": - m.SwapFree = &v + m.SwapFree = &val + m.SwapFreeBytes = &valBytes case "Dirty:": - m.Dirty = &v + m.Dirty = &val + m.DirtyBytes = &valBytes case "Writeback:": - m.Writeback = &v + m.Writeback = &val + m.WritebackBytes = &valBytes case "AnonPages:": - m.AnonPages = &v + m.AnonPages = &val + m.AnonPagesBytes = &valBytes case "Mapped:": - m.Mapped = &v + m.Mapped = &val + m.MappedBytes = &valBytes case "Shmem:": - m.Shmem = &v + m.Shmem = &val + m.ShmemBytes = &valBytes case "Slab:": - m.Slab = &v + m.Slab = &val + m.SlabBytes = &valBytes case "SReclaimable:": - m.SReclaimable = &v + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes case "SUnreclaim:": - m.SUnreclaim = &v + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes case "KernelStack:": - m.KernelStack = &v + m.KernelStack = &val + m.KernelStackBytes = &valBytes case "PageTables:": - m.PageTables = &v + m.PageTables = &val + m.PageTablesBytes = &valBytes case "NFS_Unstable:": - m.NFSUnstable = &v + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes case "Bounce:": - m.Bounce = &v + m.Bounce = &val + m.BounceBytes = &valBytes case "WritebackTmp:": - m.WritebackTmp = &v + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes case "CommitLimit:": - m.CommitLimit = &v + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes case "Committed_AS:": - m.CommittedAS = &v + m.CommittedAS = &val + m.CommittedASBytes = &valBytes case "VmallocTotal:": - m.VmallocTotal = &v + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes case "VmallocUsed:": - m.VmallocUsed = &v + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes case "VmallocChunk:": - m.VmallocChunk = &v + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes case "HardwareCorrupted:": - m.HardwareCorrupted = &v + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes case "AnonHugePages:": - m.AnonHugePages = &v + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes case "ShmemHugePages:": - m.ShmemHugePages = &v + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes case "CmaTotal:": - m.CmaTotal = &v + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes case "CmaFree:": - m.CmaFree = &v + m.CmaFree = &val + m.CmaFreeBytes = &valBytes case "HugePages_Total:": - m.HugePagesTotal = &v + m.HugePagesTotal = &val case "HugePages_Free:": - m.HugePagesFree = &v + m.HugePagesFree = &val case "HugePages_Rsvd:": - m.HugePagesRsvd = &v + m.HugePagesRsvd = &val case "HugePages_Surp:": - m.HugePagesSurp = &v + m.HugePagesSurp = &val case "Hugepagesize:": - m.Hugepagesize = &v + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes case "DirectMap4k:": - m.DirectMap4k = &v + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes case "DirectMap2M:": - m.DirectMap2M = &v + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes case "DirectMap1G:": - m.DirectMap1G = &v + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes } } diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 388ebf396d..a704c5e735 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, fmt.Errorf("%s: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 9d8af6db74..75a3b6c810 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -194,8 +194,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The average time from the point the client sends RPC requests until it receives the response. - AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } - if ns[0] != 0 { - opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) - } if len(ns) > 8 { opStats.Errors = ns[8] @@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index fdfa456119..316df5fbb7 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 4da81ea577..b70f1fc7a4 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -50,10 +50,13 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 + // Drops shows the total number of dropped packets of all UPD sockets. + Drops *uint64 } // netIPSocketLine represents the fields parsed from a single line // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { Sl uint64 @@ -66,6 +69,7 @@ type ( RxQueue uint64 UID uint64 Inode uint64 + Drops *uint64 } ) @@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) { defer f.Close() var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } @@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { defer f.Close() var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } } if err := s.Err(); err != nil { return nil, err @@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } // parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( @@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") @@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address @@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue @@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index 360e36af7d..fae62b13d9 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index c770852919..71c8059f4d 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 0000000000..13994c1782 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index acbbc57eab..d868cebdaa 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7443edca94..7c597bc870 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) } return m, nil @@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index d1f71caa5d..142796368f 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index c86d815d73..9530b14bc6 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c22666750f..0f8f847f95 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) { typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index fe9dbb425f..ccd35f153a 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a407..09060e8208 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 923e55005b..06a8d931c9 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -110,6 +110,11 @@ type ProcStat struct { Policy uint // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int proc FS } @@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) { &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, ) if err != nil { return ProcStat{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 46307f5721..a055197c63 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "math/bits" "sort" "strconv" "strings" @@ -76,9 +77,9 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 @@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "NSpid": s.NSpids = calcNSPidsList(vString) case "VmPeak": @@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.CpusAllowedList = calcCpusAllowedList(vString) } + return nil } // TotalCtxtSwitches returns the total context switch. diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 12c5bf05b7..5eefbe2ef8 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index b8fad677dc..28708e0745 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 34fc3ee21b..e36b41c18a 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index fa00f555db..65fec834bf 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index df2215ece0..80e0e947be 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index ce5fefa5b3..e54d94b090 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad6f..7e19eba090 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109ad..1906341657 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd40..21629087ba 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a059..1d2f71824a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f21c..4e91332bb5 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 0000000000..baa0cc7d7f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 0000000000..b83c6cf64c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 0000000000..e78f7dfe69 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f807..d8921950d7 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42ddebd..8b32836850 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a5f..1bd87304f4 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfeb9..6b7ce929eb 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go index a99b714e27..600a557324 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go +++ b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go @@ -71,6 +71,15 @@ func (es NonNilMultiError) Cause() error { return es.getCause() } +func (es NonNilMultiError) Is(target error) bool { + for _, err := range es { + if errors.Is(err, target) { + return true + } + } + return false +} + func (es NonNilMultiError) getCause() NonNilMultiRootError { var causes []error for _, err := range es { diff --git a/vendor/github.com/thanos-io/thanos/pkg/reloader/reloader.go b/vendor/github.com/thanos-io/thanos/pkg/reloader/reloader.go index 0821b90fed..77c6b05d70 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/reloader/reloader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/reloader/reloader.go @@ -75,7 +75,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/minio/sha256-simd" - ps "github.com/mitchellh/go-ps" + "github.com/mitchellh/go-ps" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -87,14 +87,15 @@ import ( // It optionally substitutes environment variables in the configuration. // Referenced environment variables must be of the form `$(var)` (not `$var` or `${var}`). type Reloader struct { - logger log.Logger - cfgFile string - cfgOutputFile string - cfgDirs []CfgDirOption - watchInterval time.Duration - retryInterval time.Duration - watchedDirs []string - watcher *watcher + logger log.Logger + cfgFile string + cfgOutputFile string + cfgDirs []CfgDirOption + tolerateEnvVarExpansionErrors bool + retryInterval time.Duration + watchInterval time.Duration + watchedDirs []string + watcher *watcher tr TriggerReloader @@ -104,13 +105,14 @@ type Reloader struct { lastCfgDirFiles []map[string]struct{} forceReload bool - reloads prometheus.Counter - reloadErrors prometheus.Counter - lastReloadSuccess prometheus.Gauge - lastReloadSuccessTimestamp prometheus.Gauge - configApplyErrors prometheus.Counter - configApply prometheus.Counter - reloaderInfo *prometheus.GaugeVec + reloads prometheus.Counter + reloadErrors prometheus.Counter + lastReloadSuccess prometheus.Gauge + lastReloadSuccessTimestamp prometheus.Gauge + configApplyErrors prometheus.Counter + configEnvVarExpansionErrors prometheus.Gauge + configApply prometheus.Counter + reloaderInfo *prometheus.GaugeVec } // TriggerReloader reloads the configuration of the process. @@ -172,6 +174,9 @@ type Options struct { // RetryInterval controls how often the reloader retries a reloading of the // configuration in case the reload operation returned an error. RetryInterval time.Duration + // TolerateEnvVarExpansionErrors suppresses errors when expanding environment variables in the config file, and + // leaves the unset variables as is. All found environment variables are still expanded. + TolerateEnvVarExpansionErrors bool } var firstGzipBytes = []byte{0x1f, 0x8b, 0x08} @@ -183,15 +188,16 @@ func New(logger log.Logger, reg prometheus.Registerer, o *Options) *Reloader { logger = log.NewNopLogger() } r := &Reloader{ - logger: logger, - cfgFile: o.CfgFile, - cfgOutputFile: o.CfgOutputFile, - cfgDirs: o.CfgDirs, - lastCfgDirFiles: make([]map[string]struct{}, len(o.CfgDirs)), - watcher: newWatcher(logger, reg, o.DelayInterval), - watchedDirs: o.WatchedDirs, - watchInterval: o.WatchInterval, - retryInterval: o.RetryInterval, + logger: logger, + cfgFile: o.CfgFile, + cfgOutputFile: o.CfgOutputFile, + cfgDirs: o.CfgDirs, + lastCfgDirFiles: make([]map[string]struct{}, len(o.CfgDirs)), + watcher: newWatcher(logger, reg, o.DelayInterval), + watchedDirs: o.WatchedDirs, + watchInterval: o.WatchInterval, + retryInterval: o.RetryInterval, + tolerateEnvVarExpansionErrors: o.TolerateEnvVarExpansionErrors, reloads: promauto.With(reg).NewCounter( prometheus.CounterOpts{ @@ -229,6 +235,12 @@ func New(logger log.Logger, reg prometheus.Registerer, o *Options) *Reloader { Help: "Total number of config apply operations that failed.", }, ), + configEnvVarExpansionErrors: promauto.With(reg).NewGauge( + prometheus.GaugeOpts{ + Name: "reloader_config_environment_variable_expansion_errors", + Help: "Number of environment variable expansions that failed during the last operation.", + }, + ), reloaderInfo: promauto.With(reg).NewGaugeVec( prometheus.GaugeOpts{ Name: "reloader_info", @@ -348,7 +360,7 @@ func (r *Reloader) Watch(ctx context.Context) error { } } -func normalize(logger log.Logger, inputFile, outputFile string) error { +func (r *Reloader) normalize(inputFile, outputFile string) error { b, err := os.ReadFile(inputFile) if err != nil { return errors.Wrap(err, "read file") @@ -360,7 +372,7 @@ func normalize(logger log.Logger, inputFile, outputFile string) error { if err != nil { return errors.Wrap(err, "create gzip reader") } - defer runutil.CloseWithLogOnErr(logger, zr, "gzip reader close") + defer runutil.CloseWithLogOnErr(r.logger, zr, "gzip reader close") b, err = io.ReadAll(zr) if err != nil { @@ -368,7 +380,7 @@ func normalize(logger log.Logger, inputFile, outputFile string) error { } } - b, err = expandEnv(b) + b, err = r.expandEnv(b) if err != nil { return errors.Wrap(err, "expand environment variables") } @@ -402,7 +414,7 @@ func (r *Reloader) apply(ctx context.Context) error { } cfgHash = h.Sum(nil) if r.cfgOutputFile != "" { - if err := normalize(r.logger, r.cfgFile, r.cfgOutputFile); err != nil { + if err := r.normalize(r.cfgFile, r.cfgOutputFile); err != nil { return err } } @@ -446,7 +458,7 @@ func (r *Reloader) apply(ctx context.Context) error { outFile := filepath.Join(outDir, targetFile.Name()) cfgDirFiles[outFile] = struct{}{} - if err := normalize(r.logger, path, outFile); err != nil { + if err := r.normalize(path, outFile); err != nil { return errors.Wrapf(err, "move file: %s", path) } } @@ -692,21 +704,30 @@ func RuntimeInfoURLFromBase(u *url.URL) *url.URL { var envRe = regexp.MustCompile(`\$\(([a-zA-Z_0-9]+)\)`) -func expandEnv(b []byte) (r []byte, err error) { - r = envRe.ReplaceAllFunc(b, func(n []byte) []byte { +func (r *Reloader) expandEnv(b []byte) (replaced []byte, err error) { + configEnvVarExpansionErrorsCount := 0 + replaced = envRe.ReplaceAllFunc(b, func(n []byte) []byte { if err != nil { return nil } + m := n n = n[2 : len(n)-1] v, ok := os.LookupEnv(string(n)) if !ok { - err = errors.Errorf("found reference to unset environment variable %q", n) + configEnvVarExpansionErrorsCount++ + errStr := errors.Errorf("found reference to unset environment variable %q", n) + if r.tolerateEnvVarExpansionErrors { + level.Warn(r.logger).Log("msg", "expand environment variable", "err", errStr) + return m + } + err = errStr return nil } return []byte(v) }) - return r, err + r.configEnvVarExpansionErrors.Set(float64(configEnvVarExpansionErrorsCount)) + return replaced, err } type watcher struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 809dfce36b..a455a504b8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -3,7 +3,7 @@ // Package runutil provides helpers to advanced function scheduling control like repeat or retry. // -// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// It's very often the case when you need to executes some code every fixed intervals or have it retried automatically. // To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. // Below function does it for you. // @@ -45,7 +45,7 @@ // The rununtil.Exhaust* family of functions provide the same functionality but // they take an io.ReadCloser and they exhaust the whole reader before closing // them. They are useful when trying to use http keep-alive connections because -// for the same connection to be re-used the whole response body needs to be +// for the same connection to be reused the whole response body needs to be // exhausted. package runutil diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 0000000000..8902bdaaff --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 0000000000..bf6e357854 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 0000000000..b524b8135d --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 0000000000..1a0e6dad00 --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 6ca8d9ad6c..652aa48b85 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -15,16 +15,15 @@ import ( // ArrayCodec is the Codec used for bsoncore.Array values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index dde3e76815..0134b5a94b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -17,13 +17,28 @@ import ( // ByteSliceCodec is the Codec used for []byte values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. type ByteSliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values // instead of BSON null. // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. EncodeNilAsEmpty bool } @@ -38,8 +53,8 @@ var ( // NewByteSliceCodec returns a ByteSliceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 2ce119731b..7e08aab35e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -41,7 +41,7 @@ func newDefaultStructCodec() *StructCodec { if err != nil { // This function is called from the codec registration path, so errors can't be propagated. If there's an error // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) + panic(fmt.Errorf("error creating default StructCodec: %w", err)) } return codec } @@ -178,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe for { key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } else if err != nil { return err @@ -1379,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value keyType := val.Type().Key() for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -1675,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR idx := 0 for { vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { + if errors.Is(err, bsonrw.ErrEOA) { break } if err != nil { @@ -1787,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b elems := make([]reflect.Value, 0) for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 4ab14a668c..4751ae995e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -343,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -352,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -418,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -427,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -487,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -496,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index 94f7dcf1eb..098368f071 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -17,13 +17,27 @@ import ( // EmptyInterfaceCodec is the Codec used for interface{} values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. type EmptyInterfaceCodec struct { // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. // - // Deprecated: Use bson.Decoder.BinaryAsSlice instead. + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. DecodeBinaryAsSlice bool } @@ -38,8 +52,8 @@ var ( // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 325c1738ab..d7e00ffa8d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -8,6 +8,7 @@ package bsoncodec import ( "encoding" + "errors" "fmt" "reflect" "strconv" @@ -21,25 +22,40 @@ var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. type MapCodec struct { // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination // value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroMaps instead. + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. DecodeZerosMap bool // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of // BSON null. // - // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. EncodeNilAsEmpty bool // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name // strings using fmt.Sprintf() instead of the default string conversion logic. // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } @@ -61,8 +77,8 @@ type KeyUnmarshaler interface { // NewMapCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -128,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v } currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -137,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -200,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -313,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, if mc.EncodeKeysWithStringer { parsed, err := strconv.ParseFloat(key, 64) if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) } keyVal = reflect.ValueOf(parsed) break diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index e5923230b0..ddfa4a33e1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -18,8 +18,16 @@ var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) type PointerCodec struct { ecache typeEncoderCache dcache typeDecoderCache @@ -27,8 +35,8 @@ type PointerCodec struct { // NewPointerCodec returns a PointerCodec that has been initialized. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. func NewPointerCodec() *PointerCodec { return &PointerCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index a43daf005f..14c9fd2564 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "errors" "fmt" "reflect" @@ -20,8 +21,22 @@ var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. type SliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of // BSON null. @@ -32,8 +47,8 @@ type SliceCodec struct { // NewSliceCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -93,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -102,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index ff931b7253..a8f885a854 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -17,8 +17,16 @@ import ( // StringCodec is the Codec used for string values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) type StringCodec struct { // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. // If false, a string made from the raw object ID bytes will be used. Defaults to true. @@ -38,8 +46,8 @@ var ( // NewStringCodec returns a StringCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 4cde0a4d6b..f8d9690c13 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -60,8 +60,22 @@ type Zeroer interface { // StructCodec is the Codec used for struct values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. type StructCodec struct { cache sync.Map // map[reflect.Type]*structDescription parser StructTagParser @@ -69,7 +83,7 @@ type StructCodec struct { // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the // destination value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroStructs instead. + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. DecodeZeroStruct bool // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the @@ -82,7 +96,7 @@ type StructCodec struct { // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag // option is set. // - // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. EncodeOmitDefaultStruct bool // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. @@ -95,7 +109,8 @@ type StructCodec struct { // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The // default value is true. // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -104,8 +119,8 @@ var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -164,11 +179,11 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - if err != nil && err != errInvalidValue { + if err != nil && !errors.Is(err, errInvalidValue) { return err } - if err == errInvalidValue { + if errors.Is(err, errInvalidValue) { if desc.omitEmpty { continue } @@ -189,17 +204,17 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val encoder := desc.encoder - var zero bool + var empty bool if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rv.Interface()) + empty = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // isZero will not treat an interface rv as an interface, so we need to check for the - // zero interface separately. - zero = rv.IsNil() + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() } else { - zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && zero { + if desc.omitEmpty && empty { continue } @@ -239,8 +254,8 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val } func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { + var de *DecodeError + if !errors.As(original, &de) { return &DecodeError{ keys: []string{key}, wrapped: original, @@ -308,7 +323,7 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val for { name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -391,12 +406,15 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return nil } -func isZero(v reflect.Value, omitZeroStruct bool) bool { +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { kind := v.Kind() if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { return v.Interface().(Zeroer).IsZero() } - if kind == reflect.Struct { + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: if !omitZeroStruct { return false } @@ -410,7 +428,7 @@ func isZero(v reflect.Value, omitZeroStruct bool) bool { if ff.PkgPath != "" && !ff.Anonymous { continue // Private field } - if !isZero(v.Field(i), omitZeroStruct) { + if !isEmpty(v.Field(i), omitZeroStruct) { return false } } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index 7b005a9958..22fb762c41 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -23,12 +23,26 @@ const ( // TimeCodec is the Codec used for time.Time values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. type TimeCodec struct { // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. // - // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. UseLocalTimeZone bool } @@ -42,8 +56,8 @@ var ( // NewTimeCodec returns a TimeCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 7eb1069050..8525472769 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -18,13 +18,27 @@ import ( // UIntCodec is the Codec used for uint values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. type UIntCodec struct { // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. // - // Deprecated: Use bson.Encoder.IntMinSize instead. + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. EncodeToMinSize bool } @@ -38,8 +52,8 @@ var ( // NewUIntCodec returns a UIntCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 4d279b7fee..1e25570b85 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" @@ -442,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { for { vr, err := ar.ReadValue() - if err == ErrEOA { + if errors.Is(err, ErrEOA) { break } if err != nil { @@ -466,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { for { key, vr, err := dr.ReadElement() - if err == ErrEOD { + if errors.Is(err, ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 54c76bf746..bb52a0ec3d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { // convert hex to bytes bytes, err := hex.DecodeString(uuidNoHyphens) if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 2aca37a91f..59ddfc4485 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" "sync" @@ -613,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { name, t, err := ejvr.p.readKey() if err != nil { - if err == ErrEOD { + if errors.Is(err, ErrEOD) { if ejvr.stack[ejvr.frame].mode == mCodeWithScope { _, err := ejvr.p.peekType() if err != nil { @@ -640,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { t, err := ejvr.p.peekType() if err != nil { - if err == ErrEOA { + if errors.Is(err, ErrEOA) { ejvr.pop() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go index cd4843a3a4..43f3e4f383 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go @@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) { c, err = js.readNextByte() } - if err == io.EOF { + if errors.Is(err, io.EOF) { return &jsonToken{t: jttEOF}, nil } else if err != nil { return nil, err @@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { for { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { case '\\': c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { if utf16.IsSurrogate(rn) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) { c5, err := js.readNextByte() - if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) { + if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: true, p: p}, nil - } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) { + } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttNull, v: nil, p: p}, nil } else if bytes.Equal([]byte("fals"), lit) { if c5 == 'e' { c5, err = js.readNextByte() - if isValueTerminator(c5) || err == io.EOF { + if isValueTerminator(c5) || errors.Is(err, io.EOF) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: false, p: p}, nil } @@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { for { c, err = js.readNextByte() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return nil, err } @@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else { s = nssInvalid @@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawIntegerDigits @@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawFractionDigits @@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawExponentDigits diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 048b5eb998..af6098475e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -6,9 +6,9 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. -// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and -// usage examples, check out the [Work with BSON] page in the Go Driver docs site. +// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information +// and usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -38,7 +38,7 @@ // bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} // bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // -// When decoding BSON to a D or M, the following type mappings apply when unmarshalling: +// When decoding BSON to a D or M, the following type mappings apply when unmarshaling: // // 1. BSON int32 unmarshals to an int32. // 2. BSON int64 unmarshals to an int64. @@ -62,83 +62,78 @@ // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // -// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: +// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // // 1. time.Time marshals to a BSON datetime. // 2. int8, int16, and int32 marshal to a BSON int32. // 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 // otherwise. -// 4. int64 marshals to BSON int64. +// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set). // 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or // undefined value into a string will yield the empty string.). // // # Structs // -// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshaled or unmarshaled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is -// marshalled as a BSON null value. +// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// marshaled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents -// unmarshalled into an interface{} field will be unmarshalled as a D. +// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// unmarshaled into an interface{} field will be unmarshaled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. // // This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new -// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags -// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: +// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON +// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: // // structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the default field name. The following options can be used -// to configure behavior: -// -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to -// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if -// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). -// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered -// empty if their value is nil. By default, structs are only considered empty if the struct type implements the -// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are -// never considered empty and will be marshalled as embedded documents. +// The name may be empty in order to specify options without overriding the default field name. The following options can +// be used to configure behavior: +// +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if +// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. // NOTE: It is recommended that this tag be used for all slice and map fields. // // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of -// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other -// types, this tag is ignored. +// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For +// other types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, -// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be -// decoded without losing precision. For float64 or non-numeric types, this tag is ignored. +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles +// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a +// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if +// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // // 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when -// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be -// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a -// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be -// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are -// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten. -// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned. -// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be -// marshalled. For fields that are not maps or structs, this tag is ignored. -// -// # Marshalling and Unmarshalling -// -// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be +// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, +// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will +// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If +// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will +// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an +// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field +// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored. +// +// # Marshaling and Unmarshaling +// +// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions. // // [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index 24ab58fc49..08c39514be 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) { // Would be handled by the logic below, but that's trivial and common. if high == 0 && low == 0 && exp == 0 { - if posSign { - return new(big.Int), 0, nil - } return new(big.Int), 0, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 9bbaffac26..c130e3ff19 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte { var b [5]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return b @@ -193,7 +193,7 @@ func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore deleted file mode 100644 index 74a6db472e..0000000000 --- a/vendor/go.opencensus.io/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/.idea/ - -# go.opencensus.io/exporter/aws -/exporter/aws/ - -# Exclude vendor, use dep ensure after checkout: -/vendor/github.com/ -/vendor/golang.org/ -/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/go.opencensus.io/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md deleted file mode 100644 index 1ba3962c8b..0000000000 --- a/vendor/go.opencensus.io/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ - -## Instructions - -Fork the repo, checkout the upstream repo to your GOPATH by: - -``` -$ go get -d go.opencensus.io -``` - -Add your fork as an origin: - -``` -cd $(go env GOPATH)/src/go.opencensus.io -git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git -``` - -Run tests: - -``` -$ make install-tools # Only first time. -$ make -``` - -Checkout a new branch, make modifications and push the branch to your fork: - -``` -$ git checkout -b feature -# edit files -$ git commit -$ git push fork feature -``` - -Open a pull request against the main opencensus-go repo. - -## General Notes -This project uses Appveyor and Travis for CI. - -The dependencies are managed with `go mod` if you work with the sources under your -`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE deleted file mode 100644 index 7a4a3ea242..0000000000 --- a/vendor/go.opencensus.io/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile deleted file mode 100644 index d896edc996..0000000000 --- a/vendor/go.opencensus.io/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -# TODO: Fix this on windows. -ALL_SRC := $(shell find . -name '*.go' \ - -not -path './vendor/*' \ - -not -path '*/gen-go/*' \ - -type f | sort) -ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) - -GOTEST_OPT?=-v -race -timeout 30s -GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic -GOTEST=go test -GOIMPORTS=goimports -GOLINT=golint -GOVET=go vet -EMBEDMD=embedmd -# TODO decide if we need to change these names. -TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" -TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" -README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') - -.DEFAULT_GOAL := imports-lint-vet-embedmd-test - -.PHONY: imports-lint-vet-embedmd-test -imports-lint-vet-embedmd-test: imports lint vet embedmd test - -# TODO enable test-with-coverage in tavis -.PHONY: travis-ci -travis-ci: imports lint vet embedmd test test-386 - -all-pkgs: - @echo $(ALL_PKGS) | tr ' ' '\n' | sort - -all-srcs: - @echo $(ALL_SRC) | tr ' ' '\n' | sort - -.PHONY: test -test: - $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) - -.PHONY: test-386 -test-386: - GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) - -.PHONY: test-with-coverage -test-with-coverage: - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - -.PHONY: imports -imports: - @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ - if [ "$$IMPORTSOUT" ]; then \ - echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ - echo "$$IMPORTSOUT\n"; \ - exit 1; \ - else \ - echo "Imports finished successfully"; \ - fi - -.PHONY: lint -lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi - -.PHONY: install-tools -install-tools: - go install golang.org/x/lint/golint@latest - go install golang.org/x/tools/cmd/cover@latest - go install golang.org/x/tools/cmd/goimports@latest - go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md deleted file mode 100644 index 1d7e837116..0000000000 --- a/vendor/go.opencensus.io/README.md +++ /dev/null @@ -1,267 +0,0 @@ -# OpenCensus Libraries for Go - -[![Build Status][travis-image]][travis-url] -[![Windows Build Status][appveyor-image]][appveyor-url] -[![GoDoc][godoc-image]][godoc-url] -[![Gitter chat][gitter-image]][gitter-url] - -OpenCensus Go is a Go implementation of OpenCensus, a toolkit for -collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats and tracing. - -#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). - -## Installation - -``` -$ go get -u go.opencensus.io -``` - -The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). -The use of vendoring or a dependency management tool is recommended. - -## Prerequisites - -OpenCensus Go libraries require Go 1.8 or later. - -## Getting Started - -The easiest way to get started using OpenCensus in your application is to use an existing -integration with your RPC framework: - -* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) -* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) -* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) -* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) -* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) -* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) -* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) -* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) -* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) - -If you're using a framework not listed here, you could either implement your own middleware for your -framework or use [custom stats](#stats) and [spans](#spans) directly in your application. - -## Exporters - -OpenCensus can export instrumentation data to various backends. -OpenCensus has exporter implementations for the following, users -can implement their own exporters by implementing the exporter interfaces -([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), -[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): - -* [Prometheus][exporter-prom] for stats -* [OpenZipkin][exporter-zipkin] for traces -* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces -* [Jaeger][exporter-jaeger] for traces -* [AWS X-Ray][exporter-xray] for traces -* [Datadog][exporter-datadog] for stats and traces -* [Graphite][exporter-graphite] for stats -* [Honeycomb][exporter-honeycomb] for traces -* [New Relic][exporter-newrelic] for stats and traces - -## Overview - -![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) - -In a microservices environment, a user request may go through -multiple services until there is a response. OpenCensus allows -you to instrument your services and collect diagnostics data all -through your services end-to-end. - -## Tags - -Tags represent propagated key-value pairs. They are propagated using `context.Context` -in the same process or can be encoded to be transmitted on the wire. Usually, this will -be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` -for gRPC. - -Package `tag` allows adding or modifying tags in the current context. - -[embedmd]:# (internal/readme/tags.go new) -```go -ctx, err := tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "cde36753ed"), -) -if err != nil { - log.Fatal(err) -} -``` - -## Stats - -OpenCensus is a low-overhead framework even if instrumentation is always enabled. -In order to be so, it is optimized to make recording of data points fast -and separate from the data aggregation. - -OpenCensus stats collection happens in two stages: - -* Definition of measures and recording of data points -* Definition of views and aggregation of the recorded data - -### Recording - -Measurements are data points associated with a measure. -Recording implicitly tags the set of Measurements with the tags from the -provided context: - -[embedmd]:# (internal/readme/stats.go record) -```go -stats.Record(ctx, videoSize.M(102478)) -``` - -### Views - -Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (measurements). - -Views have two parts: the tags to group by and the aggregation type used. - -Currently three types of aggregations are supported: -* CountAggregation is used to count the number of times a sample was recorded. -* DistributionAggregation is used to provide a histogram of the values of the samples. -* SumAggregation is used to sum up all sample values. - -[embedmd]:# (internal/readme/stats.go aggs) -```go -distAgg := view.Distribution(1<<32, 2<<32, 3<<32) -countAgg := view.Count() -sumAgg := view.Sum() -``` - -Here we create a view with the DistributionAggregation over our measure. - -[embedmd]:# (internal/readme/stats.go view) -```go -if err := view.Register(&view.View{ - Name: "example.com/video_size_distribution", - Description: "distribution of processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), -}); err != nil { - log.Fatalf("Failed to register view: %v", err) -} -``` - -Register begins collecting data for the view. Registered views' data will be -exported via the registered exporters. - -## Traces - -A distributed trace tracks the progression of a single user request as -it is handled by the services and processes that make up an application. -Each step is called a span in the trace. Spans include metadata about the step, -including especially the time spent in the step, called the span’s latency. - -Below you see a trace and several spans underneath it. - -![Traces and spans](https://i.imgur.com/7hZwRVj.png) - -### Spans - -Span is the unit step in a trace. Each span has a name, latency, status and -additional metadata. - -Below we are starting a span for a cache read and ending it -when we are done: - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -### Propagation - -Spans can have parents or can be root spans if they don't have any parents. -The current span is propagated in-process and across the network to allow associating -new child spans with the parent. - -In the same process, `context.Context` is used to propagate spans. -`trace.StartSpan` creates a new span as a root if the current context -doesn't contain a span. Or, it creates a child of the span that is -already in current context. The returned context can be used to keep -propagating the newly created span in the current context. - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -Across the network, OpenCensus provides different propagation -methods for different protocols. - -* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) - by default but can be configured to use a custom propagation method by setting another - [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). - -## Execution Tracer - -With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. -See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) -for an example of their mutual use. - -## Profiles - -OpenCensus tags can be applied as profiler labels -for users who are on Go 1.9 and above. - -[embedmd]:# (internal/readme/tags.go profiler) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Insert(userIDKey, "fff0989878"), -) -if err != nil { - log.Fatal(err) -} -tag.Do(ctx, func(ctx context.Context) { - // Do work. - // When profiling is on, samples will be - // recorded with the key/values from the tag map. -}) -``` - -A screenshot of the CPU profile from the program above: - -![CPU profile](https://i.imgur.com/jBKjlkw.png) - -## Deprecation Policy - -Before version 1.0.0, the following deprecation policy will be observed: - -No backwards-incompatible changes will be made except for the removal of symbols that have -been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first -release in which the functionality was marked *Deprecated*. - -[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master -[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go -[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true -[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master -[godoc-image]: https://godoc.org/go.opencensus.io?status.svg -[godoc-url]: https://godoc.org/go.opencensus.io -[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg -[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge - - -[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap -[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace - -[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus -[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws -[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog -[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite -[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter -[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml deleted file mode 100644 index d08f0edaff..0000000000 --- a/vendor/go.opencensus.io/appveyor.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "{build}" - -platform: x64 - -clone_folder: c:\gopath\src\go.opencensus.io - -environment: - GOPATH: 'c:\gopath' - GO111MODULE: 'on' - CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 - -stack: go 1.11 - -before_test: - - go version - - go env - -build: false -deploy: false - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go build -v .\... - - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go deleted file mode 100644 index 81dc7183ec..0000000000 --- a/vendor/go.opencensus.io/internal/internal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/internal" - -import ( - "fmt" - "time" - - opencensus "go.opencensus.io" -) - -// UserAgent is the user agent to be added to the outgoing -// requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go deleted file mode 100644 index de8ccf236c..0000000000 --- a/vendor/go.opencensus.io/internal/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "strings" - "unicode" -) - -const labelKeySizeLimit = 100 - -// Sanitize returns a string that is trunacated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func Sanitize(s string) string { - if len(s) == 0 { - return s - } - if len(s) > labelKeySizeLimit { - s = s[:labelKeySizeLimit] - } - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go deleted file mode 100644 index 41b2c3fc03..0000000000 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package tagencoding contains the tag encoding -// used interally by the stats collector. -package tagencoding // import "go.opencensus.io/internal/tagencoding" - -// Values represent the encoded buffer for the values. -type Values struct { - Buffer []byte - WriteIndex int - ReadIndex int -} - -func (vb *Values) growIfRequired(expected int) { - if len(vb.Buffer)-vb.WriteIndex < expected { - tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) - copy(tmp, vb.Buffer) - vb.Buffer = tmp - } -} - -// WriteValue is the helper method to encode Values from map[Key][]byte. -func (vb *Values) WriteValue(v []byte) { - length := len(v) & 0xff - vb.growIfRequired(1 + length) - - // writing length of v - vb.Buffer[vb.WriteIndex] = byte(length) - vb.WriteIndex++ - - if length == 0 { - // No value was encoded for this key - return - } - - // writing v - copy(vb.Buffer[vb.WriteIndex:], v[:length]) - vb.WriteIndex += length -} - -// ReadValue is the helper method to decode Values to a map[Key][]byte. -func (vb *Values) ReadValue() []byte { - // read length of v - length := int(vb.Buffer[vb.ReadIndex]) - vb.ReadIndex++ - if length == 0 { - // No value was encoded for this key - return nil - } - - // read value of v - v := make([]byte, length) - endIdx := vb.ReadIndex + length - copy(v, vb.Buffer[vb.ReadIndex:endIdx]) - vb.ReadIndex = endIdx - return v -} - -// Bytes returns a reference to already written bytes in the Buffer. -func (vb *Values) Bytes() []byte { - return vb.Buffer[:vb.WriteIndex] -} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go deleted file mode 100644 index 073af7b473..0000000000 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "time" -) - -// Trace allows internal access to some trace functionality. -// TODO(#412): remove this -var Trace interface{} - -// LocalSpanStoreEnabled true if the local span store is enabled. -var LocalSpanStoreEnabled bool - -// BucketConfiguration stores the number of samples to store for span buckets -// for successful and failed spans for a particular span name. -type BucketConfiguration struct { - Name string - MaxRequestsSucceeded int - MaxRequestsErrors int -} - -// PerMethodSummary is a summary of the spans stored for a single span name. -type PerMethodSummary struct { - Active int - LatencyBuckets []LatencyBucketSummary - ErrorBuckets []ErrorBucketSummary -} - -// LatencyBucketSummary is a summary of a latency bucket. -type LatencyBucketSummary struct { - MinLatency, MaxLatency time.Duration - Size int -} - -// ErrorBucketSummary is a summary of an error bucket. -type ErrorBucketSummary struct { - ErrorCode int32 - Size int -} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go deleted file mode 100644 index 52a7b3bf85..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metricdata contains the metrics data model. -// -// This is an EXPERIMENTAL package, and may change in arbitrary ways without -// notice. -package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go deleted file mode 100644 index 12695ce2dc..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Exemplars keys. -const ( - AttachmentKeySpanContext = "SpanContext" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -// -// Their purpose is to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go deleted file mode 100644 index aadae41e6a..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/label.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// LabelKey represents key of a label. It has optional -// description attribute. -type LabelKey struct { - Key string - Description string -} - -// LabelValue represents the value of a label. -// The zero value represents a missing label value, which may be treated -// differently to an empty string value by some back ends. -type LabelValue struct { - Value string // string value of the label - Present bool // flag that indicated whether a value is present or not -} - -// NewLabelValue creates a new non-nil LabelValue that represents the given string. -func NewLabelValue(val string) LabelValue { - return LabelValue{Value: val, Present: true} -} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go deleted file mode 100644 index 8293712c77..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/metric.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" - - "go.opencensus.io/resource" -) - -// Descriptor holds metadata about a metric. -type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []LabelKey // label keys -} - -// Metric represents a quantity measured against a resource with different -// label value combinations. -type Metric struct { - Descriptor Descriptor // metric descriptor - Resource *resource.Resource // resource against which this was measured - TimeSeries []*TimeSeries // one time series for each combination of label values -} - -// TimeSeries is a sequence of points associated with a combination of label -// values. -type TimeSeries struct { - LabelValues []LabelValue // label values, same order as keys in the metric descriptor - Points []Point // points sequence - StartTime time.Time // time we started recording this time series -} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go deleted file mode 100644 index 7fe057b19c..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/point.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Point is a single data point of a time series. -type Point struct { - // Time is the point in time that this point represents in a time series. - Time time.Time - // Value is the value of this point. Prefer using ReadValue to switching on - // the value type, since new value types might be added. - Value interface{} -} - -//go:generate stringer -type ValueType - -// NewFloat64Point creates a new Point holding a float64 value. -func NewFloat64Point(t time.Time, val float64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewInt64Point creates a new Point holding an int64 value. -func NewInt64Point(t time.Time, val int64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewDistributionPoint creates a new Point holding a Distribution value. -func NewDistributionPoint(t time.Time, val *Distribution) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewSummaryPoint creates a new Point holding a Summary value. -func NewSummaryPoint(t time.Time, val *Summary) Point { - return Point{ - Value: val, - Time: t, - } -} - -// ValueVisitor allows reading the value of a point. -type ValueVisitor interface { - VisitFloat64Value(float64) - VisitInt64Value(int64) - VisitDistributionValue(*Distribution) - VisitSummaryValue(*Summary) -} - -// ReadValue accepts a ValueVisitor and calls the appropriate method with the -// value of this point. -// Consumers of Point should use this in preference to switching on the type -// of the value directly, since new value types may be added. -func (p Point) ReadValue(vv ValueVisitor) { - switch v := p.Value.(type) { - case int64: - vv.VisitInt64Value(v) - case float64: - vv.VisitFloat64Value(v) - case *Distribution: - vv.VisitDistributionValue(v) - case *Summary: - vv.VisitSummaryValue(v) - default: - panic("unexpected value type") - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type Distribution struct { - // Count is the number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 - // Sum is the sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 - // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 - // BucketOptions describes the bounds of the histogram buckets in this - // distribution. - // - // A Distribution may optionally contain a histogram of the values in the - // population. - // - // If nil, there is no associated histogram. - BucketOptions *BucketOptions - // Bucket If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []Bucket -} - -// BucketOptions describes the bounds of the histogram buckets in this -// distribution. -type BucketOptions struct { - // Bounds specifies a set of bucket upper bounds. - // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket - // index i are: - // - // [0, Bounds[i]) for i == 0 - // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 - // [Bounds[i-1], +infinity) for i == N-1 - Bounds []float64 -} - -// Bucket represents a single bucket (value range) in a distribution. -type Bucket struct { - // Count is the number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 - // Exemplar associated with this bucket (if any). - Exemplar *Exemplar -} - -// Summary is a representation of percentiles. -type Summary struct { - // Count is the cumulative count (if available). - Count int64 - // Sum is the cumulative sum of values (if available). - Sum float64 - // HasCountAndSum is true if Count and Sum are available. - HasCountAndSum bool - // Snapshot represents percentiles calculated over an arbitrary time window. - // The values in this struct can be reset at arbitrary unknown times, with - // the requirement that all of them are reset at the same time. - Snapshot Snapshot -} - -// Snapshot represents percentiles over an arbitrary time. -// The values in this struct can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type Snapshot struct { - // Count is the number of values in the snapshot. Optional since some systems don't - // expose this. Set to 0 if not available. - Count int64 - // Sum is the sum of values in the snapshot. Optional since some systems don't - // expose this. If count is 0 then this field must be zero. - Sum float64 - // Percentiles is a map from percentile (range (0-100.0]) to the value of - // the percentile. - Percentiles map[float64]float64 -} - -//go:generate stringer -type Type - -// Type is the overall type of metric, including its value type and whether it -// represents a cumulative total (since the start time) or if it represents a -// gauge value. -type Type int - -// Metric types. -const ( - TypeGaugeInt64 Type = iota - TypeGaugeFloat64 - TypeGaugeDistribution - TypeCumulativeInt64 - TypeCumulativeFloat64 - TypeCumulativeDistribution - TypeSummary -) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go deleted file mode 100644 index c3f8ec27b5..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/type_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package metricdata - -import "strconv" - -const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" - -var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go deleted file mode 100644 index b483a1371b..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/unit.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// Unit is a string encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -type Unit string - -// Predefined units. To record against a unit not represented here, create your -// own Unit type constant from a string. -const ( - UnitDimensionless Unit = "1" - UnitBytes Unit = "By" - UnitMilliseconds Unit = "ms" -) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go deleted file mode 100644 index ca1f390493..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/manager.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "sync" -) - -// Manager maintains a list of active producers. Producers can register -// with the manager to allow readers to read all metrics provided by them. -// Readers can retrieve all producers registered with the manager, -// read metrics from the producers and export them. -type Manager struct { - mu sync.RWMutex - producers map[Producer]struct{} -} - -var prodMgr *Manager -var once sync.Once - -// GlobalManager is a single instance of producer manager -// that is used by all producers and all readers. -func GlobalManager() *Manager { - once.Do(func() { - prodMgr = &Manager{} - prodMgr.producers = make(map[Producer]struct{}) - }) - return prodMgr -} - -// AddProducer adds the producer to the Manager if it is not already present. -func (pm *Manager) AddProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - pm.producers[producer] = struct{}{} -} - -// DeleteProducer deletes the producer from the Manager if it is present. -func (pm *Manager) DeleteProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - delete(pm.producers, producer) -} - -// GetAll returns a slice of all producer currently registered with -// the Manager. For each call it generates a new slice. The slice -// should not be cached as registration may change at any time. It is -// typically called periodically by exporter to read metrics from -// the producers. -func (pm *Manager) GetAll() []Producer { - pm.mu.Lock() - defer pm.mu.Unlock() - producers := make([]Producer, len(pm.producers)) - i := 0 - for producer := range pm.producers { - producers[i] = producer - i++ - } - return producers -} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go deleted file mode 100644 index 6cee9ed178..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/producer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "go.opencensus.io/metric/metricdata" -) - -// Producer is a source of metrics. -type Producer interface { - // Read should return the current values of all metrics supported by this - // metric provider. - // The returned metrics should be unique for each combination of name and - // resource. - Read() []*metricdata.Metric -} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go deleted file mode 100644 index 11e31f421c..0000000000 --- a/vendor/go.opencensus.io/opencensus.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package opencensus contains Go support for OpenCensus. -package opencensus // import "go.opencensus.io" - -// Version is the current release version of OpenCensus in use. -func Version() string { - return "0.24.0" -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go deleted file mode 100644 index 2063b6f76a..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "go.opencensus.io/trace" - "google.golang.org/grpc/stats" -) - -// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and -// traces. Use with gRPC clients only. -type ClientHandler struct { - // StartOptions allows configuring the StartOptions used to create new spans. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this handler. - StartOptions trace.StartOptions -} - -// HandleConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = c.traceTagRPC(ctx, rti) - ctx = c.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go deleted file mode 100644 index fb3c19d6b6..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ClientHandler: -var ( - ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) - ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) -) - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ClientSentBytesPerRPCView = &view.View{ - Measure: ClientSentBytesPerRPC, - Name: "grpc.io/client/sent_bytes_per_rpc", - Description: "Distribution of bytes sent per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientReceivedBytesPerRPCView = &view.View{ - Measure: ClientReceivedBytesPerRPC, - Name: "grpc.io/client/received_bytes_per_rpc", - Description: "Distribution of bytes received per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientRoundtripLatencyView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/roundtrip_latency", - Description: "Distribution of round-trip latency, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ClientRoundtripLatency`, tagging - // with method and status to result in ClientCompletedRpcs. - ClientCompletedRPCsView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - Aggregation: view.Count(), - } - - ClientStartedRPCsView = &view.View{ - Measure: ClientStartedRPCs, - Name: "grpc.io/client/started_rpcs", - Description: "Number of started client RPCs.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: view.Count(), - } - - ClientSentMessagesPerRPCView = &view.View{ - Measure: ClientSentMessagesPerRPC, - Name: "grpc.io/client/sent_messages_per_rpc", - Description: "Distribution of sent messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientReceivedMessagesPerRPCView = &view.View{ - Measure: ClientReceivedMessagesPerRPC, - Name: "grpc.io/client/received_messages_per_rpc", - Description: "Distribution of received messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientServerLatencyView = &view.View{ - Measure: ClientServerLatency, - Name: "grpc.io/client/server_latency", - Description: "Distribution of server latency as viewed by client, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } -) - -// DefaultClientViews are the default client views provided by this package. -var DefaultClientViews = []*view.View{ - ClientSentBytesPerRPCView, - ClientReceivedBytesPerRPCView, - ClientRoundtripLatencyView, - ClientCompletedRPCsView, -} - -// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go deleted file mode 100644 index b36349820d..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "time" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the tag.Map populated by the application code, serializes -// its tags into the GRPC metadata in order to be sent to the server. -func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Info("clientHandler.TagRPC called with nil info.") - } - return ctx - } - - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - ts := tag.FromContext(ctx) - if ts != nil { - encoded := tag.Encode(ts) - ctx = stats.SetTags(ctx, encoded) - } - - return context.WithValue(ctx, rpcDataKey, d) -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go deleted file mode 100644 index 1370323fb7..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ocgrpc contains OpenCensus stats and trace -// integrations for gRPC. -// -// Use ServerHandler for servers and ClientHandler for clients. -package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go deleted file mode 100644 index 8a53e09727..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "google.golang.org/grpc/stats" - - "go.opencensus.io/trace" -) - -// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and -// traces. Use with gRPC servers. -// -// When installed (see Example), tracing metadata is read from inbound RPCs -// by default. If no tracing metadata is present, or if the tracing metadata is -// present but the SpanContext isn't sampled, then a new trace may be started -// (as determined by Sampler). -type ServerHandler struct { - // IsPublicEndpoint may be set to true to always start a new trace around - // each RPC. Any SpanContext in the RPC metadata will be added as a linked - // span instead of making it the parent of the span created around the - // server RPC. - // - // Be aware that if you leave this false (the default) on a public-facing - // server, callers will be able to send tracing metadata in gRPC headers - // and trigger traces in your backend. - IsPublicEndpoint bool - - // StartOptions to use for to spans started around RPCs handled by this server. - // - // These will apply even if there is tracing metadata already - // present on the inbound RPC but the SpanContext is not sampled. This - // ensures that each service has some opportunity to be traced. If you would - // like to not add any additional traces for this gRPC service, set: - // - // StartOptions.Sampler = trace.ProbabilitySampler(0.0) - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this handler. - StartOptions trace.StartOptions -} - -var _ stats.Handler = (*ServerHandler)(nil) - -// HandleConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = s.traceTagRPC(ctx, rti) - ctx = s.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go deleted file mode 100644 index fe0e971086..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ServerHandler: -var ( - ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) - ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) -) - -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ServerReceivedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_bytes_per_rpc", - Description: "Distribution of received bytes per RPC, by method.", - Measure: ServerReceivedBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", - Description: "Distribution of total sent bytes per RPC, by method.", - Measure: ServerSentBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerLatencyView = &view.View{ - Name: "grpc.io/server/server_latency", - Description: "Distribution of server latency in milliseconds, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerLatency, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ServerLatency`, tagging - // with method and status to result in ServerCompletedRpcs. - ServerCompletedRPCsView = &view.View{ - Name: "grpc.io/server/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, - Measure: ServerLatency, - Aggregation: view.Count(), - } - - ServerStartedRPCsView = &view.View{ - Measure: ServerStartedRPCs, - Name: "grpc.io/server/started_rpcs", - Description: "Number of started server RPCs.", - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: view.Count(), - } - - ServerReceivedMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/received_messages_per_rpc", - Description: "Distribution of messages received count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerReceivedMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } - - ServerSentMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_messages_per_rpc", - Description: "Distribution of messages sent count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerSentMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } -) - -// DefaultServerViews are the default server views provided by this package. -var DefaultServerViews = []*view.View{ - ServerReceivedBytesPerRPCView, - ServerSentBytesPerRPCView, - ServerLatencyView, - ServerCompletedRPCsView, -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go deleted file mode 100644 index afcef023af..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "context" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from -// it and creates a new tag.Map and puts them into the returned context. -func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("opencensus: TagRPC called with nil info.") - } - return ctx - } - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - propagated := h.extractPropagatedTags(ctx) - ctx = tag.NewContext(ctx, propagated) - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) - return context.WithValue(ctx, rpcDataKey, d) -} - -// extractPropagatedTags creates a new tag map containing the tags extracted from the -// gRPC metadata. -func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { - buf := stats.Tags(ctx) - if buf == nil { - return nil - } - propagated, err := tag.Decode(buf) - if err != nil { - if grpclog.V(2) { - grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) - } - return nil - } - return propagated -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go deleted file mode 100644 index 9cb27320ca..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -type grpcInstrumentationKey string - -// rpcData holds the instrumentation RPC data that is needed between the start -// and end of an call. It holds the info that this package needs to keep track -// of between the various GRPC events. -type rpcData struct { - // reqCount and respCount has to be the first words - // in order to be 64-aligned on 32-bit architectures. - sentCount, sentBytes, recvCount, recvBytes int64 // access atomically - - // startTime represents the time at which TagRPC was invoked at the - // beginning of an RPC. It is an appoximation of the time when the - // application code invoked GRPC code. - startTime time.Time - method string -} - -// The following variables define the default hard-coded auxiliary data used by -// both the default GRPC client and GRPC server metrics. -var ( - DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// Server tags are applied to the context used to process each RPC, as well as -// the measures at the end of each RPC. -var ( - KeyServerMethod = tag.MustNewKey("grpc_server_method") - KeyServerStatus = tag.MustNewKey("grpc_server_status") -) - -// Client tags are applied to measures at the end of each RPC. -var ( - KeyClientMethod = tag.MustNewKey("grpc_client_method") - KeyClientStatus = tag.MustNewKey("grpc_client_status") -) - -var ( - rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") -) - -func methodName(fullname string) string { - return strings.TrimLeft(fullname, "/") -} - -// statsHandleRPC processes the RPC events. -func statsHandleRPC(ctx context.Context, s stats.RPCStats) { - switch st := s.(type) { - case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: - // do nothing for client - case *stats.Begin: - handleRPCBegin(ctx, st) - case *stats.OutPayload: - handleRPCOutPayload(ctx, st) - case *stats.InPayload: - handleRPCInPayload(ctx, st) - case *stats.End: - handleRPCEnd(ctx, st) - default: - grpclog.Infof("unexpected stats: %T", st) - } -} - -func handleRPCBegin(ctx context.Context, s *stats.Begin) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - } - - if s.IsClient() { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ClientStartedRPCs.M(1))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ServerStartedRPCs.M(1))) - } -} - -func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.sentBytes, int64(s.Length)) - atomic.AddInt64(&d.sentCount, 1) -} - -func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.recvBytes, int64(s.Length)) - atomic.AddInt64(&d.recvCount, 1) -} - -func handleRPCEnd(ctx context.Context, s *stats.End) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - elapsedTime := time.Since(d.startTime) - - var st string - if s.Error != nil { - s, ok := status.FromError(s.Error) - if ok { - st = statusCodeToString(s) - } - } else { - st = "OK" - } - - latencyMillis := float64(elapsedTime) / float64(time.Millisecond) - attachments := getSpanCtxAttachment(ctx) - if s.Client { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyServerStatus, st), - ), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis))) - } -} - -func statusCodeToString(s *status.Status) string { - // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md - switch c := s.Code(); c { - case codes.OK: - return "OK" - case codes.Canceled: - return "CANCELLED" - case codes.Unknown: - return "UNKNOWN" - case codes.InvalidArgument: - return "INVALID_ARGUMENT" - case codes.DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case codes.NotFound: - return "NOT_FOUND" - case codes.AlreadyExists: - return "ALREADY_EXISTS" - case codes.PermissionDenied: - return "PERMISSION_DENIED" - case codes.ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case codes.FailedPrecondition: - return "FAILED_PRECONDITION" - case codes.Aborted: - return "ABORTED" - case codes.OutOfRange: - return "OUT_OF_RANGE" - case codes.Unimplemented: - return "UNIMPLEMENTED" - case codes.Internal: - return "INTERNAL" - case codes.Unavailable: - return "UNAVAILABLE" - case codes.DataLoss: - return "DATA_LOSS" - case codes.Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE_" + strconv.FormatInt(int64(c), 10) - } -} - -func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { - attachments := map[string]interface{}{} - span := trace.FromContext(ctx) - if span == nil { - return attachments - } - spanCtx := span.SpanContext() - if spanCtx.IsSampled() { - attachments[metricdata.AttachmentKeySpanContext] = spanCtx - } - return attachments -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go deleted file mode 100644 index 61bc543d0a..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const traceContextKey = "grpc-trace-bin" - -// TagRPC creates a new trace span for the client side of the RPC. -// -// It returns ctx with the new trace span added and a serialization of the -// SpanContext added to the outgoing gRPC metadata. -func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - ctx, span := trace.StartSpan(ctx, name, - trace.WithSampler(c.StartOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC - traceContextBinary := propagation.Binary(span.SpanContext()) - return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) -} - -// TagRPC creates a new trace span for the server side of the RPC. -// -// It checks the incoming gRPC metadata in ctx for a SpanContext, and if -// it finds one, uses that SpanContext as the parent context of the new span. -// -// It returns ctx, with the new trace span added. -func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - md, _ := metadata.FromIncomingContext(ctx) - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - traceContext := md[traceContextKey] - var ( - parent trace.SpanContext - haveParent bool - ) - if len(traceContext) > 0 { - // Metadata with keys ending in -bin are actually binary. They are base64 - // encoded before being put on the wire, see: - // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata - traceContextBinary := []byte(traceContext[0]) - parent, haveParent = propagation.FromBinary(traceContextBinary) - if haveParent && !s.IsPublicEndpoint { - ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler), - ) - return ctx - } - } - ctx, span := trace.StartSpan(ctx, name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler)) - if haveParent { - span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) - } - return ctx -} - -func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - span := trace.FromContext(ctx) - // TODO: compressed and uncompressed sizes are not populated in every message. - switch rs := rs.(type) { - case *stats.Begin: - span.AddAttributes( - trace.BoolAttribute("Client", rs.Client), - trace.BoolAttribute("FailFast", rs.FailFast)) - case *stats.InPayload: - span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) - case *stats.OutPayload: - span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) - case *stats.End: - if rs.Error != nil { - s, ok := status.FromError(rs.Error) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) - } - } - span.End() - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go deleted file mode 100644 index da815b2a73..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - "net/http/httptrace" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Transport is an http.RoundTripper that instruments all outgoing requests with -// OpenCensus stats and tracing. -// -// The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation, since the default -// for this may change. -type Transport struct { - // Base may be set to wrap another http.RoundTripper that does the actual - // requests. By default http.DefaultTransport is used. - // - // If base HTTP roundtripper implements CancelRequest, - // the returned round tripper will be cancelable. - Base http.RoundTripper - - // Propagation defines how traces are propagated. If unspecified, a default - // (currently B3 format) will be used. - Propagation propagation.HTTPFormat - - // StartOptions are applied to the span started by this Transport around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // NameFromRequest holds the function to use for generating the span name - // from the information found in the outgoing HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // NewClientTrace may be set to a function allowing the current *trace.Span - // to be annotated with HTTP request event information emitted by the - // httptrace package. - NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace - - // TODO: Implement tag propagation for HTTP. -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base() - if isHealthEndpoint(req.URL.Path) { - return rt.RoundTrip(req) - } - // TODO: remove excessive nesting of http.RoundTrippers here. - format := t.Propagation - if format == nil { - format = defaultFormat - } - spanNameFormatter := t.FormatSpanName - if spanNameFormatter == nil { - spanNameFormatter = spanNameFromURL - } - - startOpts := t.StartOptions - if t.GetStartOptions != nil { - startOpts = t.GetStartOptions(req) - } - - rt = &traceTransport{ - base: rt, - format: format, - startOptions: trace.StartOptions{ - Sampler: startOpts.Sampler, - SpanKind: trace.SpanKindClient, - }, - formatSpanName: spanNameFormatter, - newClientTrace: t.NewClientTrace, - } - rt = statsTransport{base: rt} - return rt.RoundTrip(req) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go deleted file mode 100644 index 17142aabe0..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. -type statsTransport struct { - base http.RoundTripper -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. -func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.Host), - tag.Upsert(Host, req.Host), - tag.Upsert(KeyClientPath, req.URL.Path), - tag.Upsert(Path, req.URL.Path), - tag.Upsert(KeyClientMethod, req.Method), - tag.Upsert(Method, req.Method)) - req = req.WithContext(ctx) - track := &tracker{ - start: time.Now(), - ctx: ctx, - } - if req.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if req.ContentLength > 0 { - track.reqSize = req.ContentLength - } - stats.Record(ctx, ClientRequestCount.M(1)) - - // Perform request. - resp, err := t.base.RoundTrip(req) - - if err != nil { - track.statusCode = http.StatusInternalServerError - track.end() - } else { - track.statusCode = resp.StatusCode - if req.Method != "HEAD" { - track.respContentLength = resp.ContentLength - } - if resp.Body == nil { - track.end() - } else { - track.body = resp.Body - resp.Body = wrappedBody(track, resp.Body) - } - } - return resp, err -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t statsTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -type tracker struct { - ctx context.Context - respSize int64 - respContentLength int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once -} - -var _ io.ReadCloser = (*tracker)(nil) - -func (t *tracker) end() { - t.endOnce.Do(func() { - latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - respSize := t.respSize - if t.respSize == 0 && t.respContentLength > 0 { - respSize = t.respContentLength - } - m := []stats.Measurement{ - ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(respSize), - ClientRoundtripLatency.M(latencyMs), - ClientLatency.M(latencyMs), - ClientResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ClientRequestBytes.M(t.reqSize)) - } - - stats.RecordWithTags(t.ctx, []tag.Mutator{ - tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), - tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), - }, m...) - }) -} - -func (t *tracker) Read(b []byte) (int, error) { - n, err := t.body.Read(b) - t.respSize += int64(n) - switch err { - case nil: - return n, nil - case io.EOF: - t.end() - } - return n, err -} - -func (t *tracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - t.end() - return t.body.Close() -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go deleted file mode 100644 index 10e626b16e..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ochttp provides OpenCensus instrumentation for net/http package. -// -// For server instrumentation, see Handler. For client-side instrumentation, -// see Transport. -package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go deleted file mode 100644 index 9ad8852198..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b3 contains a propagation.HTTPFormat implementation -// for B3 propagation. See https://github.com/openzipkin/b3-propagation -// for more details. -package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" - -import ( - "encoding/hex" - "net/http" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// B3 headers that OpenCensus understands. -const ( - TraceIDHeader = "X-B3-TraceId" - SpanIDHeader = "X-B3-SpanId" - SampledHeader = "X-B3-Sampled" -) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers in B3 propagation format. -// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers -// because there are additional fields not represented in the -// OpenCensus span context. Spans created from the incoming -// header will be the direct children of the client-side span. -// Similarly, receiver of the outgoing spans should use client-side -// span created by OpenCensus as the parent. -type HTTPFormat struct{} - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// SpanContextFromRequest extracts a B3 span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) - return trace.SpanContext{ - TraceID: tid, - SpanID: sid, - TraceOptions: sampled, - }, true -} - -// ParseTraceID parses the value of the X-B3-TraceId header. -func ParseTraceID(tid string) (trace.TraceID, bool) { - if tid == "" { - return trace.TraceID{}, false - } - b, err := hex.DecodeString(tid) - if err != nil || len(b) > 16 { - return trace.TraceID{}, false - } - var traceID trace.TraceID - if len(b) <= 8 { - // The lower 64-bits. - start := 8 + (8 - len(b)) - copy(traceID[start:], b) - } else { - start := 16 - len(b) - copy(traceID[start:], b) - } - - return traceID, true -} - -// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. -func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { - if sid == "" { - return trace.SpanID{}, false - } - b, err := hex.DecodeString(sid) - if err != nil || len(b) > 8 { - return trace.SpanID{}, false - } - start := 8 - len(b) - copy(spanID[start:], b) - return spanID, true -} - -// ParseSampled parses the value of the X-B3-Sampled header. -func ParseSampled(sampled string) (trace.TraceOptions, bool) { - switch sampled { - case "true", "1": - return trace.TraceOptions(1), true - default: - return trace.TraceOptions(0), false - } -} - -// SpanContextToRequest modifies the given request to include B3 headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) - req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) - - var sampled string - if sc.IsSampled() { - sampled = "1" - } else { - sampled = "0" - } - req.Header.Set(SampledHeader, sampled) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go deleted file mode 100644 index 5e6a343076..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "net/http" - - "go.opencensus.io/tag" -) - -// SetRoute sets the http_server_route tag to the given value. -// It's useful when an HTTP framework does not support the http.Handler interface -// and using WithRouteTag is not an option, but provides a way to hook into the request flow. -func SetRoute(ctx context.Context, route string) { - if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) - } -} - -// WithRouteTag returns an http.Handler that records stats with the -// http_server_route tag set to the given value. -func WithRouteTag(handler http.Handler, route string) http.Handler { - return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { - addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} - ctx, _ := tag.New(r.Context(), addRoute...) - r = r.WithContext(ctx) - handler.ServeHTTP(w, r) - return addRoute - }) -} - -// taggedHandlerFunc is a http.Handler that returns tags describing the -// processing of the request. These tags will be recorded along with the -// measures in this package at the end of the request. -type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator - -func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tags := h(w, r) - if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tags...) - } -} - -type addedTagsKey struct{} - -type addedTags struct { - t []tag.Mutator -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go deleted file mode 100644 index f7c8434be0..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Handler is an http.Handler wrapper to instrument your HTTP server with -// OpenCensus. It supports both stats and tracing. -// -// # Tracing -// -// This handler is aware of the incoming request's span, reading it from request -// headers as configured using the Propagation field. -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler - - // StartOptions are applied to the span started by this Handler around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) - // servers. If true, any trace metadata set on the incoming request will - // be added as a linked trace instead of being added as a parent of the - // current trace. - IsPublicEndpoint bool - - // FormatSpanName holds the function to use for generating the span name - // from the information found in the incoming HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // IsHealthEndpoint holds the function to use for determining if the - // incoming HTTP request should be considered a health check. This is in - // addition to the private isHealthEndpoint func which may also indicate - // tracing should be skipped. - IsHealthEndpoint func(*http.Request) bool -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var tags addedTags - r, traceEnd := h.startTrace(w, r) - defer traceEnd() - w, statsEnd := h.startStats(w, r) - defer statsEnd(&tags) - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) - handler.ServeHTTP(w, r) -} - -func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { - return r, func() {} - } - var name string - if h.FormatSpanName == nil { - name = spanNameFromURL(r) - } else { - name = h.FormatSpanName(r) - } - ctx := r.Context() - - startOpts := h.StartOptions - if h.GetStartOptions != nil { - startOpts = h.GetStartOptions(r) - } - - var span *trace.Span - sc, ok := h.extractSpanContext(r) - if ok && !h.IsPublicEndpoint { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer)) - } else { - ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer), - ) - if ok { - span.AddLink(trace.Link{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Type: trace.LinkTypeParent, - Attributes: nil, - }) - } - } - span.AddAttributes(requestAttrs(r)...) - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - } else if r.ContentLength > 0 { - span.AddMessageReceiveEvent(0, /* TODO: messageID */ - r.ContentLength, -1) - } - return r.WithContext(ctx), span.End -} - -func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { - if h.Propagation == nil { - return defaultFormat.SpanContextFromRequest(r) - } - return h.Propagation.SpanContextFromRequest(r) -} - -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { - ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.Host), - tag.Upsert(Path, r.URL.Path), - tag.Upsert(Method, r.Method)) - track := &trackingResponseWriter{ - start: time.Now(), - ctx: ctx, - writer: w, - } - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if r.ContentLength > 0 { - track.reqSize = r.ContentLength - } - stats.Record(ctx, ServerRequestCount.M(1)) - return track.wrappedResponseWriter(), track.end -} - -type trackingResponseWriter struct { - ctx context.Context - reqSize int64 - respSize int64 - start time.Time - statusCode int - statusLine string - endOnce sync.Once - writer http.ResponseWriter -} - -// Compile time assertion for ResponseWriter interface -var _ http.ResponseWriter = (*trackingResponseWriter)(nil) - -func (t *trackingResponseWriter) end(tags *addedTags) { - t.endOnce.Do(func() { - if t.statusCode == 0 { - t.statusCode = 200 - } - - span := trace.FromContext(t.ctx) - span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) - span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) - - m := []stats.Measurement{ - ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), - ServerResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ServerRequestBytes.M(t.reqSize)) - } - allTags := make([]tag.Mutator, len(tags.t)+1) - allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) - copy(allTags[1:], tags.t) - stats.RecordWithTags(t.ctx, allTags, m...) - }) -} - -func (t *trackingResponseWriter) Header() http.Header { - return t.writer.Header() -} - -func (t *trackingResponseWriter) Write(data []byte) (int, error) { - n, err := t.writer.Write(data) - t.respSize += int64(n) - // Add message event for request bytes sent. - span := trace.FromContext(t.ctx) - span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) - return n, err -} - -func (t *trackingResponseWriter) WriteHeader(statusCode int) { - t.writer.WriteHeader(statusCode) - t.statusCode = statusCode - t.statusLine = http.StatusText(t.statusCode) -} - -// wrappedResponseWriter returns a wrapped version of the original -// -// ResponseWriter and only implements the same combination of additional -// -// interfaces as the original. -// This implementation is based on https://github.com/felixge/httpsnoop. -func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = t.writer.(http.Hijacker) - cn, i1 = t.writer.(http.CloseNotifier) - pu, i2 = t.writer.(http.Pusher) - fl, i3 = t.writer.(http.Flusher) - rf, i4 = t.writer.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{t} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{t, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{t, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{t, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{t, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{t, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{t, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{t, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{t, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{t, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{t, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{t, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{t, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{t, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{t, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{t, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{t, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{t, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{t, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{t, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{t, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{t, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{t, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{t, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{t, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{t} - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go deleted file mode 100644 index 05c6c56cc7..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "strings" - - "go.opencensus.io/trace" -) - -type spanAnnotator struct { - sp *trace.Span -} - -// TODO: Remove NewSpanAnnotator at the next release. - -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -// Deprecated: Use NewSpanAnnotatingClientTrace instead -func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { - return NewSpanAnnotatingClientTrace(r, s) -} - -// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { - sa := spanAnnotator{sp: s} - - return &httptrace.ClientTrace{ - GetConn: sa.getConn, - GotConn: sa.gotConn, - PutIdleConn: sa.putIdleConn, - GotFirstResponseByte: sa.gotFirstResponseByte, - Got100Continue: sa.got100Continue, - DNSStart: sa.dnsStart, - DNSDone: sa.dnsDone, - ConnectStart: sa.connectStart, - ConnectDone: sa.connectDone, - TLSHandshakeStart: sa.tlsHandshakeStart, - TLSHandshakeDone: sa.tlsHandshakeDone, - WroteHeaders: sa.wroteHeaders, - Wait100Continue: sa.wait100Continue, - WroteRequest: sa.wroteRequest, - } -} - -func (s spanAnnotator) getConn(hostPort string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.get_connection.host_port", hostPort), - } - s.sp.Annotate(attrs, "GetConn") -} - -func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { - attrs := []trace.Attribute{ - trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), - trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), - } - if info.WasIdle { - attrs = append(attrs, - trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) - } - s.sp.Annotate(attrs, "GotConn") -} - -// PutIdleConn implements a httptrace.ClientTrace hook -func (s spanAnnotator) putIdleConn(err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) - } - s.sp.Annotate(attrs, "PutIdleConn") -} - -func (s spanAnnotator) gotFirstResponseByte() { - s.sp.Annotate(nil, "GotFirstResponseByte") -} - -func (s spanAnnotator) got100Continue() { - s.sp.Annotate(nil, "Got100Continue") -} - -func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_start.host", info.Host), - } - s.sp.Annotate(attrs, "DNSStart") -} - -func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { - var addrs []string - for _, addr := range info.Addrs { - addrs = append(addrs, addr.String()) - } - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), - } - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "DNSDone") -} - -func (s spanAnnotator) connectStart(network, addr string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_start.network", network), - trace.StringAttribute("httptrace.connect_start.addr", addr), - } - s.sp.Annotate(attrs, "ConnectStart") -} - -func (s spanAnnotator) connectDone(network, addr string, err error) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_done.network", network), - trace.StringAttribute("httptrace.connect_done.addr", addr), - } - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.connect_done.error", err.Error())) - } - s.sp.Annotate(attrs, "ConnectDone") -} - -func (s spanAnnotator) tlsHandshakeStart() { - s.sp.Annotate(nil, "TLSHandshakeStart") -} - -func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) - } - s.sp.Annotate(attrs, "TLSHandshakeDone") -} - -func (s spanAnnotator) wroteHeaders() { - s.sp.Annotate(nil, "WroteHeaders") -} - -func (s spanAnnotator) wait100Continue() { - s.sp.Annotate(nil, "Wait100Continue") -} - -func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { - var attrs []trace.Attribute - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "WroteRequest") -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go deleted file mode 100644 index ee3729040d..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// Deprecated: client HTTP measures. -var ( - // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64( - "opencensus.io/http/client/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64( - "opencensus.io/http/client/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64( - "opencensus.io/http/client/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64( - "opencensus.io/http/client/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following client HTTP measures are supported for use in custom views. -var ( - ClientSentBytes = stats.Int64( - "opencensus.io/http/client/sent_bytes", - "Total bytes sent in request body (not including headers)", - stats.UnitBytes, - ) - ClientReceivedBytes = stats.Int64( - "opencensus.io/http/client/received_bytes", - "Total bytes received in response bodies (not including headers but including error responses with bodies)", - stats.UnitBytes, - ) - ClientRoundtripLatency = stats.Float64( - "opencensus.io/http/client/roundtrip_latency", - "Time between first byte of request headers sent to last byte of response received, or terminal error", - stats.UnitMilliseconds, - ) -) - -// The following server HTTP measures are supported for use in custom views: -var ( - ServerRequestCount = stats.Int64( - "opencensus.io/http/server/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - ServerRequestBytes = stats.Int64( - "opencensus.io/http/server/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - ServerResponseBytes = stats.Int64( - "opencensus.io/http/server/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - ServerLatency = stats.Float64( - "opencensus.io/http/server/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following tags are applied to stats recorded by this package. Host, Path -// and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. -var ( - // Host is the value of the HTTP Host header. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Host = tag.MustNewKey("http.host") - - // StatusCode is the numeric HTTP response status code, - // or "error" if a transport error occurred and no status code was read. - StatusCode = tag.MustNewKey("http.status") - - // Path is the URL path (not including query string) in the request. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Path = tag.MustNewKey("http.path") - - // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method = tag.MustNewKey("http.method") - - // KeyServerRoute is a low cardinality string representing the logical - // handler of the request. This is usually the pattern registered on the a - // ServeMux (or similar string). - KeyServerRoute = tag.MustNewKey("http_server_route") -) - -// Client tag keys. -var ( - // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod = tag.MustNewKey("http_client_method") - // KeyClientPath is the URL path (not including query string). - KeyClientPath = tag.MustNewKey("http_client_path") - // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus = tag.MustNewKey("http_client_status") - // KeyClientHost is the value of the request Host header. - KeyClientHost = tag.MustNewKey("http_client_host") -) - -// Default distributions used by views in this package. -var ( - DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Package ochttp provides some convenience views for client measures. -// You still need to register these views for data to actually be collected. -var ( - ClientSentBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientReceivedBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientRoundtripLatencyDistribution = &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientCompletedCount = &view.View{ - Name: "opencensus.io/http/client/completed_count", - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - Description: "Count of completed requests, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } -) - -// Deprecated: Old client Views. -var ( - // Deprecated: No direct replacement, but see ClientCompletedCount. - ClientRequestCountView = &view.View{ - Name: "opencensus.io/http/client/request_count", - Description: "Count of HTTP requests started", - Measure: ClientRequestCount, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientSentBytesDistribution. - ClientRequestBytesView = &view.View{ - Name: "opencensus.io/http/client/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientReceivedBytesDistribution instead. - ClientResponseBytesView = &view.View{ - Name: "opencensus.io/http/client/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientRoundtripLatencyDistribution instead. - ClientLatencyView = &view.View{ - Name: "opencensus.io/http/client/latency", - Description: "Latency distribution of HTTP requests", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - } - - // Deprecated: Use ClientCompletedCount instead. - ClientRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/client/request_count_by_method", - Description: "Client request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ClientSentBytes, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientCompletedCount instead. - ClientResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/client/response_count_by_status_code", - Description: "Client response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } -) - -// Package ochttp provides some convenience views for server measures. -// You still need to register these views for data to actually be collected. -var ( - ServerRequestCountView = &view.View{ - Name: "opencensus.io/http/server/request_count", - Description: "Count of HTTP requests started", - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerRequestBytesView = &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ServerRequestBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerResponseBytesView = &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ServerResponseBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerLatencyView = &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ServerLatency, - Aggregation: DefaultLatencyDistribution, - } - - ServerRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ServerLatency, - Aggregation: view.Count(), - } -) - -// DefaultClientViews are the default client views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultClientViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, -} - -// DefaultServerViews are the default server views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultServerViews = []*view.View{ - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go deleted file mode 100644 index ed3a5db561..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" - "net/http" - "net/http/httptrace" - - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// TODO(jbd): Add godoc examples. - -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} - -// Attributes recorded on the span for the requests. -// Only trace exporters will need them. -const ( - HostAttribute = "http.host" - MethodAttribute = "http.method" - PathAttribute = "http.path" - URLAttribute = "http.url" - UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status_code" -) - -type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat - formatSpanName func(*http.Request) string - newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace -} - -// TODO(jbd): Add message events for request and response size. - -// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. -// The created span can follow a parent span, if a parent is presented in -// the request's context. -func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := t.formatSpanName(req) - // TODO(jbd): Discuss whether we want to prefix - // outgoing requests with Sent. - ctx, span := trace.StartSpan(req.Context(), name, - trace.WithSampler(t.startOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) - - if t.newClientTrace != nil { - req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) - } else { - req = req.WithContext(ctx) - } - - if t.format != nil { - // SpanContextToRequest will modify its Request argument, which is - // contrary to the contract for http.RoundTripper, so we need to - // pass it a copy of the Request. - // However, the Request struct itself was already copied by - // the WithContext calls above and so we just need to copy the header. - header := make(http.Header) - for k, v := range req.Header { - header[k] = v - } - req.Header = header - t.format.SpanContextToRequest(span.SpanContext(), req) - } - - span.AddAttributes(requestAttrs(req)...) - resp, err := t.base.RoundTrip(req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - span.End() - return resp, err - } - - span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) - - // span.End() will be invoked after - // a read from resp.Body returns io.EOF or when - // resp.Body.Close() is invoked. - bt := &bodyTracker{rc: resp.Body, span: span} - resp.Body = wrappedBody(bt, resp.Body) - return resp, err -} - -// bodyTracker wraps a response.Body and invokes -// trace.EndSpan on encountering io.EOF on reading -// the body of the original response. -type bodyTracker struct { - rc io.ReadCloser - span *trace.Span -} - -var _ io.ReadCloser = (*bodyTracker)(nil) - -func (bt *bodyTracker) Read(b []byte) (int, error) { - n, err := bt.rc.Read(b) - - switch err { - case nil: - return n, nil - case io.EOF: - bt.span.End() - default: - // For all other errors, set the span status - bt.span.SetStatus(trace.Status{ - // Code 2 is the error code for Internal server error. - Code: 2, - Message: err.Error(), - }) - } - return n, err -} - -func (bt *bodyTracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - bt.span.End() - return bt.rc.Close() -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *traceTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -func spanNameFromURL(req *http.Request) string { - return req.URL.Path -} - -func requestAttrs(r *http.Request) []trace.Attribute { - userAgent := r.UserAgent() - - attrs := make([]trace.Attribute, 0, 5) - attrs = append(attrs, - trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(URLAttribute, r.URL.String()), - trace.StringAttribute(HostAttribute, r.Host), - trace.StringAttribute(MethodAttribute, r.Method), - ) - - if userAgent != "" { - attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) - } - - return attrs -} - -func responseAttrs(resp *http.Response) []trace.Attribute { - return []trace.Attribute{ - trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), - } -} - -// TraceStatus is a utility to convert the HTTP status code to a trace.Status that -// represents the outcome as closely as possible. -func TraceStatus(httpStatusCode int, statusLine string) trace.Status { - var code int32 - if httpStatusCode < 200 || httpStatusCode >= 400 { - code = trace.StatusCodeUnknown - } - switch httpStatusCode { - case 499: - code = trace.StatusCodeCancelled - case http.StatusBadRequest: - code = trace.StatusCodeInvalidArgument - case http.StatusUnprocessableEntity: - code = trace.StatusCodeInvalidArgument - case http.StatusGatewayTimeout: - code = trace.StatusCodeDeadlineExceeded - case http.StatusNotFound: - code = trace.StatusCodeNotFound - case http.StatusForbidden: - code = trace.StatusCodePermissionDenied - case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = trace.StatusCodeUnauthenticated - case http.StatusTooManyRequests: - code = trace.StatusCodeResourceExhausted - case http.StatusNotImplemented: - code = trace.StatusCodeUnimplemented - case http.StatusServiceUnavailable: - code = trace.StatusCodeUnavailable - case http.StatusOK: - code = trace.StatusCodeOK - case http.StatusConflict: - code = trace.StatusCodeAlreadyExists - } - - return trace.Status{Code: code, Message: codeToStr[code]} -} - -var codeToStr = map[int32]string{ - trace.StatusCodeOK: `OK`, - trace.StatusCodeCancelled: `CANCELLED`, - trace.StatusCodeUnknown: `UNKNOWN`, - trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, - trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, - trace.StatusCodeNotFound: `NOT_FOUND`, - trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, - trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, - trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, - trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, - trace.StatusCodeAborted: `ABORTED`, - trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, - trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, - trace.StatusCodeInternal: `INTERNAL`, - trace.StatusCodeUnavailable: `UNAVAILABLE`, - trace.StatusCodeDataLoss: `DATA_LOSS`, - trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, -} - -func isHealthEndpoint(path string) bool { - // Health checking is pretty frequent and - // traces collected for health endpoints - // can be extremely noisy and expensive. - // Disable canonical health checking endpoints - // like /healthz and /_ah/health for now. - if path == "/healthz" || path == "/_ah/health" { - return true - } - return false -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go deleted file mode 100644 index 7d75cae2b1..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" -) - -// wrappedBody returns a wrapped version of the original -// Body and only implements the same combination of additional -// interfaces as the original. -func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { - var ( - wr, i0 = body.(io.Writer) - ) - switch { - case !i0: - return struct { - io.ReadCloser - }{wrapper} - - case i0: - return struct { - io.ReadCloser - io.Writer - }{wrapper, wr} - default: - return struct { - io.ReadCloser - }{wrapper} - } -} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go deleted file mode 100644 index b1764e1d3b..0000000000 --- a/vendor/go.opencensus.io/resource/resource.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resource provides functionality for resource, which capture -// identifying information about the entities for which signals are exported. -package resource - -import ( - "context" - "fmt" - "os" - "regexp" - "sort" - "strconv" - "strings" -) - -// Environment variables used by FromEnv to decode a resource. -const ( - EnvVarType = "OC_RESOURCE_TYPE" - EnvVarLabels = "OC_RESOURCE_LABELS" -) - -// Resource describes an entity about which identifying information and metadata is exposed. -// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. -type Resource struct { - Type string - Labels map[string]string -} - -// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. -func EncodeLabels(labels map[string]string) string { - sortedKeys := make([]string, 0, len(labels)) - for k := range labels { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - s := "" - for i, k := range sortedKeys { - if i > 0 { - s += "," - } - s += k + "=" + strconv.Quote(labels[k]) - } - return s -} - -var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) - -// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. -// A list of labels of the form `="",="",...` is accepted. -// Domain names and paths are accepted as label keys. -// Most users will want to use FromEnv instead. -func DecodeLabels(s string) (map[string]string, error) { - m := map[string]string{} - // Ensure a trailing comma, which allows us to keep the regex simpler - s = strings.TrimRight(strings.TrimSpace(s), ",") + "," - - for len(s) > 0 { - match := labelRegex.FindStringSubmatch(s) - if len(match) == 0 { - return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) - } - v := match[2] - if v == "" { - v = match[3] - } else { - var err error - if v, err = strconv.Unquote(v); err != nil { - return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) - } - } - m[match[1]] = v - - s = s[len(match[0]):] - } - return m, nil -} - -// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE -// and OC_RESOURCE_labelS environment variables. -func FromEnv(context.Context) (*Resource, error) { - res := &Resource{ - Type: strings.TrimSpace(os.Getenv(EnvVarType)), - } - labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) - if labels == "" { - return res, nil - } - var err error - if res.Labels, err = DecodeLabels(labels); err != nil { - return nil, err - } - return res, nil -} - -var _ Detector = FromEnv - -// merge resource information from b into a. In case of a collision, a takes precedence. -func merge(a, b *Resource) *Resource { - if a == nil { - return b - } - if b == nil { - return a - } - res := &Resource{ - Type: a.Type, - Labels: map[string]string{}, - } - if res.Type == "" { - res.Type = b.Type - } - for k, v := range b.Labels { - res.Labels[k] = v - } - // Labels from resource a overwrite labels from resource b. - for k, v := range a.Labels { - res.Labels[k] = v - } - return res -} - -// Detector attempts to detect resource information. -// If the detector cannot find resource information, the returned resource is nil but no -// error is returned. -// An error is only returned on unexpected failures. -type Detector func(context.Context) (*Resource, error) - -// MultiDetector returns a Detector that calls all input detectors in order and -// merges each result with the previous one. In case a type of label key is already set, -// the first set value is takes precedence. -// It returns on the first error that a sub-detector encounters. -func MultiDetector(detectors ...Detector) Detector { - return func(ctx context.Context) (*Resource, error) { - return detectAll(ctx, detectors...) - } -} - -// detectall calls all input detectors sequentially an merges each result with the previous one. -// It returns on the first error that a sub-detector encounters. -func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { - var res *Resource - for _, d := range detectors { - r, err := d(ctx) - if err != nil { - return nil, err - } - res = merge(res, r) - } - return res, nil -} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go deleted file mode 100644 index 31477a464f..0000000000 --- a/vendor/go.opencensus.io/stats/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package stats contains support for OpenCensus stats recording. - -OpenCensus allows users to create typed measures, record measurements, -aggregate the collected data, and export the aggregated data. - -# Measures - -A measure represents a type of data point to be tracked and recorded. -For example, latency, request Mb/s, and response Mb/s are measures -to collect from a server. - -Measure constructors such as Int64 and Float64 automatically -register the measure by the given name. Each registered measure needs -to be unique by name. Measures also have a description and a unit. - -Libraries can define and export measures. Application authors can then -create views and collect and break down measures by the tags they are -interested in. - -# Recording measurements - -Measurement is a data point to be collected for a measure. For example, -for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Measurements are created from measures with -the current context. Tags from the current context are recorded with the -measurements if they are any. - -Recorded measurements are dropped immediately if no views are registered for them. -There is usually no need to conditionally enable and disable -recording to reduce cost. Recording of measurements is cheap. - -Libraries can always record measurements, and applications can later decide -on which measurements they want to collect by registering views. This allows -libraries to turn on the instrumentation by default. - -# Exemplars - -For a given recorded measurement, the associated exemplar is a diagnostic map -that gives more information about the measurement. - -When aggregated using a Distribution aggregation, an exemplar is kept for each -bucket in the Distribution. This allows you to easily find an example of a -measurement that fell into each bucket. - -For example, if you also use the OpenCensus trace package and you -record a measurement with a context that contains a sampled trace span, -then the trace span will be added to the exemplar associated with the measurement. - -When exported to a supporting back end, you should be able to easily navigate -to example traces that fell into each bucket in the Distribution. -*/ -package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go deleted file mode 100644 index 436dc791f8..0000000000 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "go.opencensus.io/tag" -) - -// DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) - -// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but -// avoids interface{} conversion. -// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, -// but is interface{} here to avoid import loops -var MeasurementRecorder interface{} - -// SubscriptionReporter reports when a view subscribed with a measure. -var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go deleted file mode 100644 index 1ffd3cefc7..0000000000 --- a/vendor/go.opencensus.io/stats/measure.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "sync" - "sync/atomic" -) - -// Measure represents a single numeric value to be tracked and recorded. -// For example, latency, request bytes, and response bytes could be measures -// to collect from a server. -// -// Measures by themselves have no outside effects. In order to be exported, -// the measure needs to be used in a View. If no Views are defined over a -// measure, there is very little cost in recording it. -type Measure interface { - // Name returns the name of this measure. - // - // Measure names are globally unique (among all libraries linked into your program). - // We recommend prefixing the measure name with a domain name relevant to your - // project or application. - // - // Measure names are never sent over the wire or exported to backends. - // They are only used to create Views. - Name() string - - // Description returns the human-readable description of this measure. - Description() string - - // Unit returns the units for the values this measure takes on. - // - // Units are encoded according to the case-sensitive abbreviations from the - // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html - Unit() string -} - -// measureDescriptor is the untyped descriptor associated with each measure. -// Int64Measure and Float64Measure wrap measureDescriptor to provide typed -// recording APIs. -// Two Measures with the same name will have the same measureDescriptor. -type measureDescriptor struct { - subs int32 // access atomically - - name string - description string - unit string -} - -func (m *measureDescriptor) subscribe() { - atomic.StoreInt32(&m.subs, 1) -} - -func (m *measureDescriptor) subscribed() bool { - return atomic.LoadInt32(&m.subs) == 1 -} - -var ( - mu sync.RWMutex - measures = make(map[string]*measureDescriptor) -) - -func registerMeasureHandle(name, desc, unit string) *measureDescriptor { - mu.Lock() - defer mu.Unlock() - - if stored, ok := measures[name]; ok { - return stored - } - m := &measureDescriptor{ - name: name, - description: desc, - unit: unit, - } - measures[name] = m - return m -} - -// Measurement is the numeric value measured when recording stats. Each measure -// provides methods to create measurements of their kind. For example, Int64Measure -// provides M to convert an int64 into a measurement. -type Measurement struct { - v float64 - m Measure - desc *measureDescriptor -} - -// Value returns the value of the Measurement as a float64. -func (m Measurement) Value() float64 { - return m.v -} - -// Measure returns the Measure from which this Measurement was created. -func (m Measurement) Measure() Measure { - return m.m -} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go deleted file mode 100644 index f02c1eda84..0000000000 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Float64Measure is a measure for float64 values. -type Float64Measure struct { - desc *measureDescriptor -} - -// M creates a new float64 measurement. -// Use Record to record measurements. -func (m *Float64Measure) M(v float64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: v, - } -} - -// Float64 creates a new measure for float64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Float64(name, description, unit string) *Float64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Float64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go deleted file mode 100644 index d101d79735..0000000000 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Int64Measure is a measure for int64 values. -type Int64Measure struct { - desc *measureDescriptor -} - -// M creates a new int64 measurement. -// Use Record to record measurements. -func (m *Int64Measure) M(v int64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: float64(v), - } -} - -// Int64 creates a new measure for int64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Int64(name, description, unit string) *Int64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Int64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go deleted file mode 100644 index 8b5b99803c..0000000000 --- a/vendor/go.opencensus.io/stats/record.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "context" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - internal.SubscriptionReporter = func(measure string) { - mu.Lock() - measures[measure].subscribe() - mu.Unlock() - } -} - -// Recorder provides an interface for exporting measurement information from -// the static Record method by using the WithRecorder option. -type Recorder interface { - // Record records a set of measurements associated with the given tags and attachments. - // The second argument is a `[]Measurement`. - Record(*tag.Map, interface{}, map[string]interface{}) -} - -type recordOptions struct { - attachments metricdata.Attachments - mutators []tag.Mutator - measurements []Measurement - recorder Recorder -} - -// WithAttachments applies provided exemplar attachments. -func WithAttachments(attachments metricdata.Attachments) Options { - return func(ro *recordOptions) { - ro.attachments = attachments - } -} - -// WithTags applies provided tag mutators. -func WithTags(mutators ...tag.Mutator) Options { - return func(ro *recordOptions) { - ro.mutators = mutators - } -} - -// WithMeasurements applies provided measurements. -func WithMeasurements(measurements ...Measurement) Options { - return func(ro *recordOptions) { - ro.measurements = measurements - } -} - -// WithRecorder records the measurements to the specified `Recorder`, rather -// than to the global metrics recorder. -func WithRecorder(meter Recorder) Options { - return func(ro *recordOptions) { - ro.recorder = meter - } -} - -// Options apply changes to recordOptions. -type Options func(*recordOptions) - -func createRecordOption(ros ...Options) *recordOptions { - o := &recordOptions{} - for _, ro := range ros { - ro(o) - } - return o -} - -type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) - -// Record records one or multiple measurements with the same context at once. -// If there are any tags in the context, measurements will be tagged with them. -func Record(ctx context.Context, ms ...Measurement) { - // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality - // (RecordOptions) we can reduce some allocations to speed up this hot path - if len(ms) == 0 { - return - } - recorder := internal.MeasurementRecorder.(measurementRecorder) - record := false - for _, m := range ms { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return - } - recorder(tag.FromContext(ctx), ms, nil) - return -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) -} - -// RecordWithOptions records measurements from the given options (if any) against context -// and tags and attachments in the options (if any). -// If there are any tags in the context, measurements will be tagged with them. -func RecordWithOptions(ctx context.Context, ros ...Options) error { - o := createRecordOption(ros...) - if len(o.measurements) == 0 { - return nil - } - recorder := internal.DefaultRecorder - if o.recorder != nil { - recorder = o.recorder.Record - } - if recorder == nil { - return nil - } - record := false - for _, m := range o.measurements { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return nil - } - if len(o.mutators) > 0 { - var err error - if ctx, err = tag.New(ctx, o.mutators...); err != nil { - return err - } - } - recorder(tag.FromContext(ctx), o.measurements, o.attachments) - return nil -} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go deleted file mode 100644 index 736399652c..0000000000 --- a/vendor/go.opencensus.io/stats/units.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Units are encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -const ( - UnitNone = "1" // Deprecated: Use UnitDimensionless. - UnitDimensionless = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" - UnitSeconds = "s" -) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go deleted file mode 100644 index 61f72d20da..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import "time" - -// AggType represents the type of aggregation function used on a View. -type AggType int - -// All available aggregation types. -const ( - AggTypeNone AggType = iota // no aggregation; reserved for future use. - AggTypeCount // the count aggregation, see Count. - AggTypeSum // the sum aggregation, see Sum. - AggTypeDistribution // the distribution aggregation, see Distribution. - AggTypeLastValue // the last value aggregation, see LastValue. -) - -func (t AggType) String() string { - return aggTypeName[t] -} - -var aggTypeName = map[AggType]string{ - AggTypeNone: "None", - AggTypeCount: "Count", - AggTypeSum: "Sum", - AggTypeDistribution: "Distribution", - AggTypeLastValue: "LastValue", -} - -// Aggregation represents a data aggregation method. Use one of the functions: -// Count, Sum, or Distribution to construct an Aggregation. -type Aggregation struct { - Type AggType // Type is the AggType of this Aggregation. - Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - - newData func(time.Time) AggregationData -} - -var ( - aggCount = &Aggregation{ - Type: AggTypeCount, - newData: func(t time.Time) AggregationData { - return &CountData{Start: t} - }, - } - aggSum = &Aggregation{ - Type: AggTypeSum, - newData: func(t time.Time) AggregationData { - return &SumData{Start: t} - }, - } -) - -// Count indicates that data collected and aggregated -// with this method will be turned into a count value. -// For example, total number of accepted requests can be -// aggregated by using Count. -func Count() *Aggregation { - return aggCount -} - -// Sum indicates that data collected and aggregated -// with this method will be summed up. -// For example, accumulated request bytes can be aggregated by using -// Sum. -func Sum() *Aggregation { - return aggSum -} - -// Distribution indicates that the desired aggregation is -// a histogram distribution. -// -// A distribution aggregation may contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described -// by the bounds. This defines len(bounds)+1 buckets. -// -// If len(bounds) >= 2 then the boundaries for bucket index i are: -// -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length -// -// If len(bounds) is 0 then there is no histogram associated with the -// distribution. There will be a single bucket with boundaries -// (-infinity, +infinity). -// -// If len(bounds) is 1 then there is no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -func Distribution(bounds ...float64) *Aggregation { - agg := &Aggregation{ - Type: AggTypeDistribution, - Buckets: bounds, - } - agg.newData = func(t time.Time) AggregationData { - return newDistributionData(agg, t) - } - return agg -} - -// LastValue only reports the last value recorded using this -// aggregation. All other measurements will be dropped. -func LastValue() *Aggregation { - return &Aggregation{ - Type: AggTypeLastValue, - newData: func(_ time.Time) AggregationData { - return &LastValueData{} - }, - } -} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go deleted file mode 100644 index d93b520662..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "math" - "time" - - "go.opencensus.io/metric/metricdata" -) - -// AggregationData represents an aggregated value from a collection. -// They are reported on the view data during exporting. -// Mosts users won't directly access aggregration data. -type AggregationData interface { - isAggregationData() bool - addSample(v float64, attachments map[string]interface{}, t time.Time) - clone() AggregationData - equal(other AggregationData) bool - toPoint(t metricdata.Type, time time.Time) metricdata.Point - StartTime() time.Time -} - -const epsilon = 1e-9 - -// CountData is the aggregated data for the Count aggregation. -// A count aggregation processes data and counts the recordings. -// -// Most users won't directly access count data. -type CountData struct { - Start time.Time - Value int64 -} - -func (a *CountData) isAggregationData() bool { return true } - -func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { - a.Value = a.Value + 1 -} - -func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value, Start: a.Start} -} - -func (a *CountData) equal(other AggregationData) bool { - a2, ok := other.(*CountData) - if !ok { - return false - } - - return a.Start.Equal(a2.Start) && a.Value == a2.Value -} - -func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by CountData. -func (a *CountData) StartTime() time.Time { - return a.Start -} - -// SumData is the aggregated data for the Sum aggregation. -// A sum aggregation processes data and sums up the recordings. -// -// Most users won't directly access sum data. -type SumData struct { - Start time.Time - Value float64 -} - -func (a *SumData) isAggregationData() bool { return true } - -func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - a.Value += v -} - -func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value, Start: a.Start} -} - -func (a *SumData) equal(other AggregationData) bool { - a2, ok := other.(*SumData) - if !ok { - return false - } - return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon -} - -func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, int64(a.Value)) - case metricdata.TypeCumulativeFloat64: - return metricdata.NewFloat64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by SumData. -func (a *SumData) StartTime() time.Time { - return a.Start -} - -// DistributionData is the aggregated data for the -// Distribution aggregation. -// -// Most users won't directly access distribution data. -// -// For a distribution with N bounds, the associated DistributionData will have -// N+1 buckets. -type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*metricdata.Exemplar - bounds []float64 // histogram distribution of the values - Start time.Time -} - -func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { - bucketCount := len(agg.Buckets) + 1 - return &DistributionData{ - CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: agg.Buckets, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, - Start: t, - } -} - -// Sum returns the sum of all samples collected. -func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *DistributionData) variance() float64 { - if a.Count <= 1 { - return 0 - } - return a.SumOfSquaredDev / float64(a.Count-1) -} - -func (a *DistributionData) isAggregationData() bool { return true } - -// TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { - if v < a.Min { - a.Min = v - } - if v > a.Max { - a.Max = v - } - a.Count++ - a.addToBucket(v, attachments, t) - - if a.Count == 1 { - a.Mean = v - return - } - - oldMean := a.Mean - a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) -} - -func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { - var count *int64 - var i int - var b float64 - for i, b = range a.bounds { - if v < b { - count = &a.CountPerBucket[i] - break - } - } - if count == nil { // Last bucket. - i = len(a.bounds) - count = &a.CountPerBucket[i] - } - *count++ - if exemplar := getExemplar(v, attachments, t); exemplar != nil { - a.ExemplarsPerBucket[i] = exemplar - } -} - -func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { - if len(attachments) == 0 { - return nil - } - return &metricdata.Exemplar{ - Value: v, - Timestamp: t, - Attachments: attachments, - } -} - -func (a *DistributionData) clone() AggregationData { - c := *a - c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) - return &c -} - -func (a *DistributionData) equal(other AggregationData) bool { - a2, ok := other.(*DistributionData) - if !ok { - return false - } - if a2 == nil { - return false - } - if len(a.CountPerBucket) != len(a2.CountPerBucket) { - return false - } - for i := range a.CountPerBucket { - if a.CountPerBucket[i] != a2.CountPerBucket[i] { - return false - } - } - return a.Start.Equal(a2.Start) && - a.Count == a2.Count && - a.Min == a2.Min && - a.Max == a2.Max && - math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon -} - -func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeDistribution: - buckets := []metricdata.Bucket{} - for i := 0; i < len(a.CountPerBucket); i++ { - buckets = append(buckets, metricdata.Bucket{ - Count: a.CountPerBucket[i], - Exemplar: a.ExemplarsPerBucket[i], - }) - } - bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} - - val := &metricdata.Distribution{ - Count: a.Count, - Sum: a.Sum(), - SumOfSquaredDeviation: a.SumOfSquaredDev, - BucketOptions: bucketOptions, - Buckets: buckets, - } - return metricdata.NewDistributionPoint(t, val) - - default: - // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by DistributionData. -func (a *DistributionData) StartTime() time.Time { - return a.Start -} - -// LastValueData returns the last value recorded for LastValue aggregation. -type LastValueData struct { - Value float64 -} - -func (l *LastValueData) isAggregationData() bool { - return true -} - -func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - l.Value = v -} - -func (l *LastValueData) clone() AggregationData { - return &LastValueData{l.Value} -} - -func (l *LastValueData) equal(other AggregationData) bool { - a2, ok := other.(*LastValueData) - if !ok { - return false - } - return l.Value == a2.Value -} - -func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeGaugeInt64: - return metricdata.NewInt64Point(t, int64(l.Value)) - case metricdata.TypeGaugeFloat64: - return metricdata.NewFloat64Point(t, l.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns an empty time value as start time is not recorded when using last value -// aggregation. -func (l *LastValueData) StartTime() time.Time { - return time.Time{} -} - -// ClearStart clears the Start field from data if present. Useful for testing in cases where the -// start time will be nondeterministic. -func ClearStart(data AggregationData) { - switch data := data.(type) { - case *CountData: - data.Start = time.Time{} - case *SumData: - data.Start = time.Time{} - case *DistributionData: - data.Start = time.Time{} - } -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go deleted file mode 100644 index bcd6e08c74..0000000000 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "sort" - "time" - - "go.opencensus.io/internal/tagencoding" - "go.opencensus.io/tag" -) - -type collector struct { - // signatures holds the aggregations values for each unique tag signature - // (values for all keys) to its aggregator. - signatures map[string]AggregationData - // Aggregation is the description of the aggregation to perform for this - // view. - a *Aggregation -} - -func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { - aggregator, ok := c.signatures[s] - if !ok { - aggregator = c.a.newData(t) - c.signatures[s] = aggregator - } - aggregator.addSample(v, attachments, t) -} - -// collectRows returns a snapshot of the collected Row values. -func (c *collector) collectedRows(keys []tag.Key) []*Row { - rows := make([]*Row, 0, len(c.signatures)) - for sig, aggregator := range c.signatures { - tags := decodeTags([]byte(sig), keys) - row := &Row{Tags: tags, Data: aggregator.clone()} - rows = append(rows, row) - } - return rows -} - -func (c *collector) clearRows() { - c.signatures = make(map[string]AggregationData) -} - -// encodeWithKeys encodes the map by using values -// only associated with the keys provided. -func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { - // Compute the buffer length we will need ahead of time to avoid resizing later - reqLen := 0 - for _, k := range keys { - s, _ := m.Value(k) - // We will store each key + its length - reqLen += len(s) + 1 - } - vb := &tagencoding.Values{ - Buffer: make([]byte, reqLen), - } - for _, k := range keys { - v, _ := m.Value(k) - vb.WriteValue([]byte(v)) - } - return vb.Bytes() -} - -// decodeTags decodes tags from the buffer and -// orders them by the keys. -func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { - vb := &tagencoding.Values{Buffer: buf} - var tags []tag.Tag - for _, k := range keys { - v := vb.ReadValue() - if v != nil { - tags = append(tags, tag.Tag{Key: k, Value: string(v)}) - } - } - vb.ReadIndex = 0 - sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) - return tags -} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go deleted file mode 100644 index 60bf0e3925..0000000000 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package view contains support for collecting and exposing aggregates over stats. -// -// In order to collect measurements, views need to be defined and registered. -// A view allows recorded measurements to be filtered and aggregated. -// -// All recorded measurements can be grouped by a list of tags. -// -// OpenCensus provides several aggregation methods: Count, Distribution and Sum. -// -// Count only counts the number of measurement points recorded. -// Distribution provides statistical summary of the aggregated data by counting -// how many recorded measurements fall into each bucket. -// Sum adds up the measurement values. -// LastValue just keeps track of the most recently recorded measurement value. -// All aggregations are cumulative. -// -// Views can be registered and unregistered at any time during program execution. -// -// Libraries can define views but it is recommended that in most cases registering -// views be left up to applications. -// -// # Exporting -// -// Collected and aggregated data can be exported to a metric collection -// backend by registering its exporter. -// -// Multiple exporters can be registered to upload the data to various -// different back ends. -package view // import "go.opencensus.io/stats/view" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go deleted file mode 100644 index 73ba11f5b6..0000000000 --- a/vendor/go.opencensus.io/stats/view/export.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package view - -// Exporter exports the collected records as view data. -// -// The ExportView method should return quickly; if an -// Exporter takes a significant amount of time to -// process a Data, that work should be done on another goroutine. -// -// It is safe to assume that ExportView will not be called concurrently from -// multiple goroutines. -// -// The Data should not be modified. -type Exporter interface { - ExportView(viewData *Data) -} - -// RegisterExporter registers an exporter. -// Collected data will be reported via all the -// registered exporters. Once you no longer -// want data to be exported, invoke UnregisterExporter -// with the previously registered exporter. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - defaultWorker.RegisterExporter(e) -} - -// UnregisterExporter unregisters an exporter. -func UnregisterExporter(e Exporter) { - defaultWorker.UnregisterExporter(e) -} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go deleted file mode 100644 index 293b54ecbe..0000000000 --- a/vendor/go.opencensus.io/stats/view/view.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function before data will be -// collected and sent to Exporters. -type View struct { - Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. - Description string // Description is a human-readable description for this view. - - // TagKeys are the tag keys describing the grouping of this view. - // A single Row will be produced for each combination of associated tag values. - TagKeys []tag.Key - - // Measure is a stats.Measure to aggregate in this view. - Measure stats.Measure - - // Aggregation is the aggregation function to apply to the set of Measurements. - Aggregation *Aggregation -} - -// WithName returns a copy of the View with a new name. This is useful for -// renaming views to cope with limitations placed on metric names by various -// backends. -func (v *View) WithName(name string) *View { - vNew := *v - vNew.Name = name - return &vNew -} - -// same compares two views and returns true if they represent the same aggregation. -func (v *View) same(other *View) bool { - if v == other { - return true - } - if v == nil { - return false - } - return reflect.DeepEqual(v.Aggregation, other.Aggregation) && - v.Measure.Name() == other.Measure.Name() -} - -// ErrNegativeBucketBounds error returned if histogram contains negative bounds. -// -// Deprecated: this should not be public. -var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") - -// canonicalize canonicalizes v by setting explicit -// defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalize() error { - if v.Measure == nil { - return fmt.Errorf("cannot register view %q: measure not set", v.Name) - } - if v.Aggregation == nil { - return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) - } - if v.Name == "" { - v.Name = v.Measure.Name() - } - if v.Description == "" { - v.Description = v.Measure.Description() - } - if err := checkViewName(v.Name); err != nil { - return err - } - sort.Slice(v.TagKeys, func(i, j int) bool { - return v.TagKeys[i].Name() < v.TagKeys[j].Name() - }) - sort.Float64s(v.Aggregation.Buckets) - for _, b := range v.Aggregation.Buckets { - if b < 0 { - return ErrNegativeBucketBounds - } - } - // drop 0 bucket silently. - v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) - - return nil -} - -func dropZeroBounds(bounds ...float64) []float64 { - for i, bound := range bounds { - if bound > 0 { - return bounds[i:] - } - } - return []float64{} -} - -// viewInternal is the internal representation of a View. -type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector - metricDescriptor *metricdata.Descriptor -} - -func newViewInternal(v *View) (*viewInternal, error) { - return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - metricDescriptor: viewToMetricDescriptor(v), - }, nil -} - -func (v *viewInternal) subscribe() { - atomic.StoreUint32(&v.subscribed, 1) -} - -func (v *viewInternal) unsubscribe() { - atomic.StoreUint32(&v.subscribed, 0) -} - -// isSubscribed returns true if the view is exporting -// data by subscription. -func (v *viewInternal) isSubscribed() bool { - return atomic.LoadUint32(&v.subscribed) == 1 -} - -func (v *viewInternal) clearRows() { - v.collector.clearRows() -} - -func (v *viewInternal) collectedRows() []*Row { - return v.collector.collectedRows(v.view.TagKeys) -} - -func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { - if !v.isSubscribed() { - return - } - sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val, attachments, t) -} - -// A Data is a set of rows about usage of the single measure associated -// with the given view. Each row is specific to a unique set of tags. -type Data struct { - View *View - Start, End time.Time - Rows []*Row -} - -// Row is the collected value for a specific set of key value pairs a.k.a tags. -type Row struct { - Tags []tag.Tag - Data AggregationData -} - -func (r *Row) String() string { - var buffer bytes.Buffer - buffer.WriteString("{ ") - buffer.WriteString("{ ") - for _, t := range r.Tags { - buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) - } - buffer.WriteString(" }") - buffer.WriteString(fmt.Sprintf("%v", r.Data)) - buffer.WriteString(" }") - return buffer.String() -} - -// Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even if both rows have the same tags but the tags appear in -// different orders it will return false. -func (r *Row) Equal(other *Row) bool { - if r == other { - return true - } - return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) -} - -const maxNameLength = 255 - -// Returns true if the given string contains only printable characters. -func isPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} - -func checkViewName(name string) error { - if len(name) > maxNameLength { - return fmt.Errorf("view name cannot be larger than %v", maxNameLength) - } - if !isPrintable(name) { - return fmt.Errorf("view name needs to be an ASCII string") - } - return nil -} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go deleted file mode 100644 index 57d615ec7e..0000000000 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" -) - -func getUnit(unit string) metricdata.Unit { - switch unit { - case "1": - return metricdata.UnitDimensionless - case "ms": - return metricdata.UnitMilliseconds - case "By": - return metricdata.UnitBytes - } - return metricdata.UnitDimensionless -} - -func getType(v *View) metricdata.Type { - m := v.Measure - agg := v.Aggregation - - switch agg.Type { - case AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeDistribution: - return metricdata.TypeCumulativeDistribution - case AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeGaugeInt64 - case *stats.Float64Measure: - return metricdata.TypeGaugeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeCount: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeInt64 - default: - panic("unexpected measure type") - } - default: - panic("unexpected aggregation type") - } -} - -func getLabelKeys(v *View) []metricdata.LabelKey { - labelKeys := []metricdata.LabelKey{} - for _, k := range v.TagKeys { - labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) - } - return labelKeys -} - -func viewToMetricDescriptor(v *View) *metricdata.Descriptor { - return &metricdata.Descriptor{ - Name: v.Name, - Description: v.Description, - Unit: convertUnit(v), - Type: getType(v), - LabelKeys: getLabelKeys(v), - } -} - -func convertUnit(v *View) metricdata.Unit { - switch v.Aggregation.Type { - case AggTypeCount: - return metricdata.UnitDimensionless - default: - return getUnit(v.Measure.Unit()) - } -} - -func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { - labelValues := []metricdata.LabelValue{} - tagMap := make(map[string]string) - for _, tag := range row.Tags { - tagMap[tag.Key.Name()] = tag.Value - } - - for _, key := range expectedKeys { - if val, ok := tagMap[key.Key]; ok { - labelValues = append(labelValues, metricdata.NewLabelValue(val)) - } else { - labelValues = append(labelValues, metricdata.LabelValue{}) - } - } - return labelValues -} - -func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { - return &metricdata.TimeSeries{ - Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: row.Data.StartTime(), - } -} - -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { - rows := v.collectedRows() - if len(rows) == 0 { - return nil - } - - ts := []*metricdata.TimeSeries{} - for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now)) - } - - m := &metricdata.Metric{ - Descriptor: *v.metricDescriptor, - TimeSeries: ts, - Resource: r, - } - return m -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go deleted file mode 100644 index 6a79cd8a34..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "fmt" - "sync" - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - defaultWorker = NewMeter().(*worker) - go defaultWorker.start() - internal.DefaultRecorder = record - internal.MeasurementRecorder = recordMeasurement -} - -type measureRef struct { - measure string - views map[*viewInternal]struct{} -} - -type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - viewStartTimes map[*viewInternal]time.Time - - timer *time.Ticker - c chan command - quit, done chan bool - mu sync.RWMutex - r *resource.Resource - - exportersMu sync.RWMutex - exporters map[Exporter]struct{} -} - -// Meter defines an interface which allows a single process to maintain -// multiple sets of metrics exports (intended for the advanced case where a -// single process wants to report metrics about multiple objects, such as -// multiple databases or HTTP services). -// -// Note that this is an advanced use case, and the static functions in this -// module should cover the common use cases. -type Meter interface { - stats.Recorder - // Find returns a registered view associated with this name. - // If no registered view is found, nil is returned. - Find(name string) *View - // Register begins collecting data for the given views. - // Once a view is registered, it reports data to the registered exporters. - Register(views ...*View) error - // Unregister the given views. Data will not longer be exported for these views - // after Unregister returns. - // It is not necessary to unregister from views you expect to collect for the - // duration of your program execution. - Unregister(views ...*View) - // SetReportingPeriod sets the interval between reporting aggregated views in - // the program. If duration is less than or equal to zero, it enables the - // default behavior. - // - // Note: each exporter makes different promises about what the lowest supported - // duration is. For example, the Stackdriver exporter recommends a value no - // lower than 1 minute. Consult each exporter per your needs. - SetReportingPeriod(time.Duration) - - // RegisterExporter registers an exporter. - // Collected data will be reported via all the - // registered exporters. Once you no longer - // want data to be exported, invoke UnregisterExporter - // with the previously registered exporter. - // - // Binaries can register exporters, libraries shouldn't register exporters. - RegisterExporter(Exporter) - // UnregisterExporter unregisters an exporter. - UnregisterExporter(Exporter) - // SetResource may be used to set the Resource associated with this registry. - // This is intended to be used in cases where a single process exports metrics - // for multiple Resources, typically in a multi-tenant situation. - SetResource(*resource.Resource) - - // Start causes the Meter to start processing Record calls and aggregating - // statistics as well as exporting data. - Start() - // Stop causes the Meter to stop processing calls and terminate data export. - Stop() - - // RetrieveData gets a snapshot of the data collected for the the view registered - // with the given name. It is intended for testing only. - RetrieveData(viewName string) ([]*Row, error) -} - -var _ Meter = (*worker)(nil) - -var defaultWorker *worker - -var defaultReportingDuration = 10 * time.Second - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func Find(name string) (v *View) { - return defaultWorker.Find(name) -} - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func (w *worker) Find(name string) (v *View) { - req := &getViewByNameReq{ - name: name, - c: make(chan *getViewByNameResp), - } - w.c <- req - resp := <-req.c - return resp.v -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func Register(views ...*View) error { - return defaultWorker.Register(views...) -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func (w *worker) Register(views ...*View) error { - req := ®isterViewReq{ - views: views, - err: make(chan error), - } - w.c <- req - return <-req.err -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func Unregister(views ...*View) { - defaultWorker.Unregister(views...) -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func (w *worker) Unregister(views ...*View) { - names := make([]string, len(views)) - for i := range views { - names[i] = views[i].Name - } - req := &unregisterFromViewReq{ - views: names, - done: make(chan struct{}), - } - w.c <- req - <-req.done -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func RetrieveData(viewName string) ([]*Row, error) { - return defaultWorker.RetrieveData(viewName) -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func (w *worker) RetrieveData(viewName string) ([]*Row, error) { - req := &retrieveDataReq{ - now: time.Now(), - v: viewName, - c: make(chan *retrieveDataResp), - } - w.c <- req - resp := <-req.c - return resp.rows, resp.err -} - -func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - defaultWorker.Record(tags, ms, attachments) -} - -func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - defaultWorker.recordMeasurement(tags, ms, attachments) -} - -// Record records a set of measurements ms associated with the given tags and attachments. -func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) -} - -// recordMeasurement records a set of measurements ms associated with the given tags and attachments. -// This is the same as Record but without an interface{} type to avoid allocations -func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - req := &recordReq{ - tm: tags, - ms: ms, - attachments: attachments, - t: time.Now(), - } - w.c <- req -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func SetReportingPeriod(d time.Duration) { - defaultWorker.SetReportingPeriod(d) -} - -// Stop stops the default worker. -func Stop() { - defaultWorker.Stop() -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func (w *worker) SetReportingPeriod(d time.Duration) { - // TODO(acetechnologist): ensure that the duration d is more than a certain - // value. e.g. 1s - req := &setReportingPeriodReq{ - d: d, - c: make(chan bool), - } - w.c <- req - <-req.c // don't return until the timer is set to the new duration. -} - -// NewMeter constructs a Meter instance. You should only need to use this if -// you need to separate out Measurement recordings and View aggregations within -// a single process. -func NewMeter() Meter { - return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - viewStartTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), - - exporters: make(map[Exporter]struct{}), - } -} - -// SetResource associates all data collected by this Meter with the specified -// resource. This resource is reported when using metricexport.ReadAndExport; -// it is not provided when used with ExportView/RegisterExporter, because that -// interface does not provide a means for reporting the Resource. -func (w *worker) SetResource(r *resource.Resource) { - w.r = r -} - -func (w *worker) Start() { - go w.start() -} - -func (w *worker) start() { - prodMgr := metricproducer.GlobalManager() - prodMgr.AddProducer(w) - - for { - select { - case cmd := <-w.c: - cmd.handleCommand(w) - case <-w.timer.C: - w.reportUsage() - case <-w.quit: - w.timer.Stop() - close(w.c) - close(w.done) - return - } - } -} - -func (w *worker) Stop() { - prodMgr := metricproducer.GlobalManager() - prodMgr.DeleteProducer(w) - select { - case <-w.quit: - default: - close(w.quit) - } - <-w.done -} - -func (w *worker) getMeasureRef(name string) *measureRef { - if mr, ok := w.measures[name]; ok { - return mr - } - mr := &measureRef{ - measure: name, - views: make(map[*viewInternal]struct{}), - } - w.measures[name] = mr - return mr -} - -func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - w.mu.Lock() - defer w.mu.Unlock() - vi, err := newViewInternal(v) - if err != nil { - return nil, err - } - if x, ok := w.views[vi.view.Name]; ok { - if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) - } - - // the view is already registered so there is nothing to do and the - // command is considered successful. - return x, nil - } - w.views[vi.view.Name] = vi - w.viewStartTimes[vi] = time.Now() - ref := w.getMeasureRef(vi.view.Measure.Name()) - ref.views[vi] = struct{}{} - return vi, nil -} - -func (w *worker) unregisterView(v *viewInternal) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.views, v.view.Name) - delete(w.viewStartTimes, v) - if measure := w.measures[v.view.Measure.Name()]; measure != nil { - delete(measure.views, v) - } -} - -func (w *worker) reportView(v *viewInternal) { - if !v.isSubscribed() { - return - } - rows := v.collectedRows() - viewData := &Data{ - View: v.view, - Start: w.viewStartTimes[v], - End: time.Now(), - Rows: rows, - } - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - for e := range w.exporters { - e.ExportView(viewData) - } -} - -func (w *worker) reportUsage() { - w.mu.Lock() - defer w.mu.Unlock() - for _, v := range w.views { - w.reportView(v) - } -} - -func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { - if !v.isSubscribed() { - return nil - } - - return viewToMetric(v, w.r, now) -} - -// Read reads all view data and returns them as metrics. -// It is typically invoked by metric reader to export stats in metric format. -func (w *worker) Read() []*metricdata.Metric { - w.mu.Lock() - defer w.mu.Unlock() - now := time.Now() - metrics := make([]*metricdata.Metric, 0, len(w.views)) - for _, v := range w.views { - metric := w.toMetric(v, now) - if metric != nil { - metrics = append(metrics, metric) - } - } - return metrics -} - -func (w *worker) RegisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - w.exporters[e] = struct{}{} -} - -func (w *worker) UnregisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - delete(w.exporters, e) -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go deleted file mode 100644 index 9ac4cc0599..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "errors" - "fmt" - "strings" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -type command interface { - handleCommand(w *worker) -} - -// getViewByNameReq is the command to get a view given its name. -type getViewByNameReq struct { - name string - c chan *getViewByNameResp -} - -type getViewByNameResp struct { - v *View -} - -func (cmd *getViewByNameReq) handleCommand(w *worker) { - v := w.views[cmd.name] - if v == nil { - cmd.c <- &getViewByNameResp{nil} - return - } - cmd.c <- &getViewByNameResp{v.view} -} - -// registerViewReq is the command to register a view. -type registerViewReq struct { - views []*View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - for _, v := range cmd.views { - if err := v.canonicalize(); err != nil { - cmd.err <- err - return - } - } - var errstr []string - for _, view := range cmd.views { - vi, err := w.tryRegisterView(view) - if err != nil { - errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) - continue - } - internal.SubscriptionReporter(view.Measure.Name()) - vi.subscribe() - } - if len(errstr) > 0 { - cmd.err <- errors.New(strings.Join(errstr, "\n")) - } else { - cmd.err <- nil - } -} - -// unregisterFromViewReq is the command to unregister to a view. Has no -// impact on the data collection for client that are pulling data from the -// library. -type unregisterFromViewReq struct { - views []string - done chan struct{} -} - -func (cmd *unregisterFromViewReq) handleCommand(w *worker) { - for _, name := range cmd.views { - vi, ok := w.views[name] - if !ok { - continue - } - - // Report pending data for this view before removing it. - w.reportView(vi) - - vi.unsubscribe() - if !vi.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - vi.clearRows() - } - w.unregisterView(vi) - } - cmd.done <- struct{}{} -} - -// retrieveDataReq is the command to retrieve data for a view. -type retrieveDataReq struct { - now time.Time - v string - c chan *retrieveDataResp -} - -type retrieveDataResp struct { - rows []*Row - err error -} - -func (cmd *retrieveDataReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - vi, ok := w.views[cmd.v] - if !ok { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), - } - return - } - - if !vi.isSubscribed() { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), - } - return - } - cmd.c <- &retrieveDataResp{ - vi.collectedRows(), - nil, - } -} - -// recordReq is the command to record data related to multiple measures -// at once. -type recordReq struct { - tm *tag.Map - ms []stats.Measurement - attachments map[string]interface{} - t time.Time -} - -func (cmd *recordReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not registered - continue - } - ref := w.getMeasureRef(m.Measure().Name()) - for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) - } - } -} - -// setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the registered clients. -type setReportingPeriodReq struct { - d time.Duration - c chan bool -} - -func (cmd *setReportingPeriodReq) handleCommand(w *worker) { - w.timer.Stop() - if cmd.d <= 0 { - w.timer = time.NewTicker(defaultReportingDuration) - } else { - w.timer = time.NewTicker(cmd.d) - } - cmd.c <- true -} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go deleted file mode 100644 index b27d1b26b1..0000000000 --- a/vendor/go.opencensus.io/tag/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" -) - -// FromContext returns the tag map stored in the context. -func FromContext(ctx context.Context) *Map { - // The returned tag map shouldn't be mutated. - ts := ctx.Value(mapCtxKey) - if ts == nil { - return nil - } - return ts.(*Map) -} - -// NewContext creates a new context with the given tag map. -// To propagate a tag map to downstream methods and downstream RPCs, add a tag map -// to the current context. NewContext will return a copy of the current context, -// and put the tag map into the returned one. -// If there is already a tag map in the current context, it will be replaced with m. -func NewContext(ctx context.Context, m *Map) context.Context { - return context.WithValue(ctx, mapCtxKey, m) -} - -type ctxKey struct{} - -var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go deleted file mode 100644 index da16b74e4d..0000000000 --- a/vendor/go.opencensus.io/tag/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package tag contains OpenCensus tags. - -Tags are key-value pairs. Tags provide additional cardinality to -the OpenCensus instrumentation data. - -Tags can be propagated on the wire and in the same -process via context.Context. Encode and Decode should be -used to represent tags into their binary propagation form. -*/ -package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go deleted file mode 100644 index 71ec913657..0000000000 --- a/vendor/go.opencensus.io/tag/key.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -// Key represents a tag key. -type Key struct { - name string -} - -// NewKey creates or retrieves a string key identified by name. -// Calling NewKey more than once with the same name returns the same key. -func NewKey(name string) (Key, error) { - if !checkKeyName(name) { - return Key{}, errInvalidKeyName - } - return Key{name: name}, nil -} - -// MustNewKey returns a key with the given name, and panics if name is an invalid key name. -func MustNewKey(name string) Key { - k, err := NewKey(name) - if err != nil { - panic(err) - } - return k -} - -// Name returns the name of the key. -func (k Key) Name() string { - return k.name -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go deleted file mode 100644 index 0272ef85a4..0000000000 --- a/vendor/go.opencensus.io/tag/map.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "bytes" - "context" - "fmt" - "sort" -) - -// Tag is a key value pair that can be propagated on wire. -type Tag struct { - Key Key - Value string -} - -type tagContent struct { - value string - m metadatas -} - -// Map is a map of tags. Use New to create a context containing -// a new Map. -type Map struct { - m map[Key]tagContent -} - -// Value returns the value for the key if a value for the key exists. -func (m *Map) Value(k Key) (string, bool) { - if m == nil { - return "", false - } - v, ok := m.m[k] - return v.value, ok -} - -func (m *Map) String() string { - if m == nil { - return "nil" - } - keys := make([]Key, 0, len(m.m)) - for k := range m.m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) - - var buffer bytes.Buffer - buffer.WriteString("{ ") - for _, k := range keys { - buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) - } - buffer.WriteString(" }") - return buffer.String() -} - -func (m *Map) insert(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - return - } - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) update(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - m.m[k] = tagContent{value: v, m: md} - } -} - -func (m *Map) upsert(k Key, v string, md metadatas) { - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) delete(k Key) { - delete(m.m, k) -} - -func newMap() *Map { - return &Map{m: make(map[Key]tagContent)} -} - -// Mutator modifies a tag map. -type Mutator interface { - Mutate(t *Map) (*Map, error) -} - -// Insert returns a mutator that inserts a -// value associated with k. If k already exists in the tag map, -// mutator doesn't update the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Insert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.insert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Update returns a mutator that updates the -// value of the tag associated with k with v. If k doesn't -// exists in the tag map, the mutator doesn't insert the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Update(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.update(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Upsert returns a mutator that upserts the -// value of the tag associated with k with v. It inserts the -// value if k doesn't exist already. It mutates the value -// if k already exists. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Upsert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.upsert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -func createMetadatas(mds ...Metadata) metadatas { - var metas metadatas - if len(mds) > 0 { - for _, md := range mds { - if md != nil { - md(&metas) - } - } - } else { - WithTTL(TTLUnlimitedPropagation)(&metas) - } - return metas - -} - -// Delete returns a mutator that deletes -// the value associated with k. -func Delete(k Key) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - m.delete(k) - return m, nil - }, - } -} - -// New returns a new context that contains a tag map -// originated from the incoming context and modified -// with the provided mutators. -func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { - m := newMap() - orig := FromContext(ctx) - if orig != nil { - for k, v := range orig.m { - if !checkKeyName(k.Name()) { - return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) - } - if !checkValue(v.value) { - return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) - } - m.insert(k, v.value, v.m) - } - } - var err error - for _, mod := range mutator { - m, err = mod.Mutate(m) - if err != nil { - return ctx, err - } - } - return NewContext(ctx, m), nil -} - -// Do is similar to pprof.Do: a convenience for installing the tags -// from the context as Go profiler labels. This allows you to -// correlated runtime profiling with stats. -// -// It converts the key/values from the given map to Go profiler labels -// and calls pprof.Do. -// -// Do is going to do nothing if your Go version is below 1.9. -func Do(ctx context.Context, f func(ctx context.Context)) { - do(ctx, f) -} - -type mutator struct { - fn func(t *Map) (*Map, error) -} - -func (m *mutator) Mutate(t *Map) (*Map, error) { - return m.fn(t) -} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go deleted file mode 100644 index c242e695c8..0000000000 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "encoding/binary" - "fmt" -) - -// KeyType defines the types of keys allowed. Currently only keyTypeString is -// supported. -type keyType byte - -const ( - keyTypeString keyType = iota - keyTypeInt64 - keyTypeTrue - keyTypeFalse - - tagsVersionID = byte(0) -) - -type encoderGRPC struct { - buf []byte - writeIdx, readIdx int -} - -// writeKeyString writes the fieldID '0' followed by the key string and value -// string. -func (eg *encoderGRPC) writeTagString(k, v string) { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k) - eg.writeStringWithVarintLen(v) -} - -func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { - eg.writeByte(byte(keyTypeInt64)) - eg.writeStringWithVarintLen(k) - eg.writeUint64(i) -} - -func (eg *encoderGRPC) writeTagTrue(k string) { - eg.writeByte(byte(keyTypeTrue)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeTagFalse(k string) { - eg.writeByte(byte(keyTypeFalse)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { - length := len(bytes) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], bytes) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeStringWithVarintLen(s string) { - length := len(s) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], s) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeByte(v byte) { - eg.growIfRequired(1) - eg.buf[eg.writeIdx] = v - eg.writeIdx++ -} - -func (eg *encoderGRPC) writeUint32(i uint32) { - eg.growIfRequired(4) - binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 4 -} - -func (eg *encoderGRPC) writeUint64(i uint64) { - eg.growIfRequired(8) - binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 8 -} - -func (eg *encoderGRPC) readByte() byte { - b := eg.buf[eg.readIdx] - eg.readIdx++ - return b -} - -func (eg *encoderGRPC) readUint32() uint32 { - i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) - eg.readIdx += 4 - return i -} - -func (eg *encoderGRPC) readUint64() uint64 { - i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) - eg.readIdx += 8 - return i -} - -func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { - if eg.readEnded() { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) - if valueStart <= 0 { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - - valueStart += eg.readIdx - valueEnd := valueStart + int(length) - if valueEnd > len(eg.buf) { - return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) - } - - eg.readIdx = valueEnd - return eg.buf[valueStart:valueEnd], nil -} - -func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { - bytes, err := eg.readBytesWithVarintLen() - if err != nil { - return "", err - } - return string(bytes), nil -} - -func (eg *encoderGRPC) growIfRequired(expected int) { - if len(eg.buf)-eg.writeIdx < expected { - tmp := make([]byte, 2*(len(eg.buf)+1)+expected) - copy(tmp, eg.buf) - eg.buf = tmp - } -} - -func (eg *encoderGRPC) readEnded() bool { - return eg.readIdx >= len(eg.buf) -} - -func (eg *encoderGRPC) bytes() []byte { - return eg.buf[:eg.writeIdx] -} - -// Encode encodes the tag map into a []byte. It is useful to propagate -// the tag maps on wire in binary format. -func Encode(m *Map) []byte { - if m == nil { - return nil - } - eg := &encoderGRPC{ - buf: make([]byte, len(m.m)), - } - eg.writeByte(tagsVersionID) - for k, v := range m.m { - if v.m.ttl.ttl == valueTTLUnlimitedPropagation { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v.value)) - } - } - return eg.bytes() -} - -// Decode decodes the given []byte into a tag map. -func Decode(bytes []byte) (*Map, error) { - ts := newMap() - err := DecodeEach(bytes, ts.upsert) - if err != nil { - // no partial failures - return nil, err - } - return ts, nil -} - -// DecodeEach decodes the given serialized tag map, calling handler for each -// tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { - eg := &encoderGRPC{ - buf: bytes, - } - if len(eg.buf) == 0 { - return nil - } - - version := eg.readByte() - if version > tagsVersionID { - return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) - } - - for !eg.readEnded() { - typ := keyType(eg.readByte()) - - if typ != keyTypeString { - return fmt.Errorf("cannot decode: invalid key type: %q", typ) - } - - k, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - v, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - key, err := NewKey(string(k)) - if err != nil { - return err - } - val := string(v) - if !checkValue(val) { - return errInvalidValue - } - fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go deleted file mode 100644 index 6571a583ea..0000000000 --- a/vendor/go.opencensus.io/tag/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -const ( - // valueTTLNoPropagation prevents tag from propagating. - valueTTLNoPropagation = 0 - - // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. - valueTTLUnlimitedPropagation = -1 -) - -// TTL is metadata that specifies number of hops a tag can propagate. -// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata -type TTL struct { - ttl int -} - -var ( - // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. - TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} - - // TTLNoPropagation is TTL metadata that prevents tag from propagating. - TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} -) - -type metadatas struct { - ttl TTL -} - -// Metadata applies metadatas specified by the function. -type Metadata func(*metadatas) - -// WithTTL applies metadata with provided ttl. -func WithTTL(ttl TTL) Metadata { - return func(m *metadatas) { - m.ttl = ttl - } -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go deleted file mode 100644 index 8fb17226fe..0000000000 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package tag - -import ( - "context" - "runtime/pprof" -) - -func do(ctx context.Context, f func(ctx context.Context)) { - m := FromContext(ctx) - keyvals := make([]string, 0, 2*len(m.m)) - for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v.value) - } - pprof.Do(ctx, pprof.Labels(keyvals...), f) -} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go deleted file mode 100644 index e28cf13cde..0000000000 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package tag - -import "context" - -func do(ctx context.Context, f func(ctx context.Context)) { - f(ctx) -} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go deleted file mode 100644 index 0939fc6748..0000000000 --- a/vendor/go.opencensus.io/tag/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tag - -import "errors" - -const ( - maxKeyLength = 255 - - // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). - validKeyValueMin = 32 - validKeyValueMax = 126 -) - -var ( - errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") - errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") -) - -func checkKeyName(name string) bool { - if len(name) == 0 { - return false - } - if len(name) > maxKeyLength { - return false - } - return isASCII(name) -} - -func isASCII(s string) bool { - for _, c := range s { - if (c < validKeyValueMin) || (c > validKeyValueMax) { - return false - } - } - return true -} - -func checkValue(v string) bool { - if len(v) > maxKeyLength { - return false - } - return isASCII(v) -} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go deleted file mode 100644 index c8e26ed635..0000000000 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "fmt" - "time" -) - -type ( - // TraceID is a 16-byte identifier for a set of spans. - TraceID [16]byte - - // SpanID is an 8-byte identifier for a single span. - SpanID [8]byte -) - -func (t TraceID) String() string { - return fmt.Sprintf("%02x", t[:]) -} - -func (s SpanID) String() string { - return fmt.Sprintf("%02x", s[:]) -} - -// Annotation represents a text annotation with a set of attributes and a timestamp. -type Annotation struct { - Time time.Time - Message string - Attributes map[string]interface{} -} - -// Attribute represents a key-value pair on a span, link or annotation. -// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. -type Attribute struct { - key string - value interface{} -} - -// Key returns the attribute's key -func (a *Attribute) Key() string { - return a.key -} - -// Value returns the attribute's value -func (a *Attribute) Value() interface{} { - return a.value -} - -// BoolAttribute returns a bool-valued attribute. -func BoolAttribute(key string, value bool) Attribute { - return Attribute{key: key, value: value} -} - -// Int64Attribute returns an int64-valued attribute. -func Int64Attribute(key string, value int64) Attribute { - return Attribute{key: key, value: value} -} - -// Float64Attribute returns a float64-valued attribute. -func Float64Attribute(key string, value float64) Attribute { - return Attribute{key: key, value: value} -} - -// StringAttribute returns a string-valued attribute. -func StringAttribute(key string, value string) Attribute { - return Attribute{key: key, value: value} -} - -// LinkType specifies the relationship between the span that had the link -// added, and the linked span. -type LinkType int32 - -// LinkType values. -const ( - LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The linked span is a child of the current span. - LinkTypeParent // The linked span is the parent of the current span. -) - -// Link represents a reference from one span to another span. -type Link struct { - TraceID TraceID - SpanID SpanID - Type LinkType - // Attributes is a set of attributes on the link. - Attributes map[string]interface{} -} - -// MessageEventType specifies the type of message event. -type MessageEventType int32 - -// MessageEventType values. -const ( - MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. - MessageEventTypeSent // Indicates a sent RPC message. - MessageEventTypeRecv // Indicates a received RPC message. -) - -// MessageEvent represents an event describing a message sent or received on the network. -type MessageEvent struct { - Time time.Time - EventType MessageEventType - MessageID int64 - UncompressedByteSize int64 - CompressedByteSize int64 -} - -// Status is the status of a Span. -type Status struct { - // Code is a status code. Zero indicates success. - // - // If Code will be propagated to Google APIs, it ideally should be a value from - // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . - Code int32 - Message string -} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go deleted file mode 100644 index 775f8274fa..0000000000 --- a/vendor/go.opencensus.io/trace/config.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - - "go.opencensus.io/trace/internal" -) - -// Config represents the global tracing configuration. -type Config struct { - // DefaultSampler is the default sampler used when creating new spans. - DefaultSampler Sampler - - // IDGenerator is for internal use only. - IDGenerator internal.IDGenerator - - // MaxAnnotationEventsPerSpan is max number of annotation events per span - MaxAnnotationEventsPerSpan int - - // MaxMessageEventsPerSpan is max number of message events per span - MaxMessageEventsPerSpan int - - // MaxAnnotationEventsPerSpan is max number of attributes per span - MaxAttributesPerSpan int - - // MaxLinksPerSpan is max number of links per span - MaxLinksPerSpan int -} - -var configWriteMu sync.Mutex - -const ( - // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span - DefaultMaxAnnotationEventsPerSpan = 32 - - // DefaultMaxMessageEventsPerSpan is default max number of message events per span - DefaultMaxMessageEventsPerSpan = 128 - - // DefaultMaxAttributesPerSpan is default max number of attributes per span - DefaultMaxAttributesPerSpan = 32 - - // DefaultMaxLinksPerSpan is default max number of links per span - DefaultMaxLinksPerSpan = 32 -) - -// ApplyConfig applies changes to the global tracing configuration. -// -// Fields not provided in the given config are going to be preserved. -func ApplyConfig(cfg Config) { - configWriteMu.Lock() - defer configWriteMu.Unlock() - c := *config.Load().(*Config) - if cfg.DefaultSampler != nil { - c.DefaultSampler = cfg.DefaultSampler - } - if cfg.IDGenerator != nil { - c.IDGenerator = cfg.IDGenerator - } - if cfg.MaxAnnotationEventsPerSpan > 0 { - c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan - } - if cfg.MaxMessageEventsPerSpan > 0 { - c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan - } - if cfg.MaxAttributesPerSpan > 0 { - c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan - } - if cfg.MaxLinksPerSpan > 0 { - c.MaxLinksPerSpan = cfg.MaxLinksPerSpan - } - config.Store(&c) -} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go deleted file mode 100644 index 7a1616a55c..0000000000 --- a/vendor/go.opencensus.io/trace/doc.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace contains support for OpenCensus distributed tracing. - -The following assumes a basic familiarity with OpenCensus concepts. -See http://opencensus.io - -# Exporting Traces - -To export collected tracing data, register at least one exporter. You can use -one of the provided exporters or write your own. - - trace.RegisterExporter(exporter) - -By default, traces will be sampled relatively rarely. To change the sampling -frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler -to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - -Be careful about using trace.AlwaysSample in a production application with -significant traffic: a new trace will be started and exported for every request. - -# Adding Spans to a Trace - -A trace consists of a tree of spans. In Go, the current span is carried in a -context.Context. - -It is common to want to capture all the activity of a function call in a span. For -this to work, the function must take a context.Context as a parameter. Add these two -lines to the top of the function: - - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() - -StartSpan will create a new top-level span if the context -doesn't contain another span, otherwise it will create a child span. -*/ -package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go deleted file mode 100644 index ffc264f23d..0000000000 --- a/vendor/go.opencensus.io/trace/evictedqueue.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -type evictedQueue struct { - queue []interface{} - capacity int - droppedCount int -} - -func newEvictedQueue(capacity int) *evictedQueue { - eq := &evictedQueue{ - capacity: capacity, - queue: make([]interface{}, 0), - } - - return eq -} - -func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { - eq.queue = eq.queue[1:] - eq.droppedCount++ - } - eq.queue = append(eq.queue, value) -} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go deleted file mode 100644 index e0d9a4b99e..0000000000 --- a/vendor/go.opencensus.io/trace/export.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "sync/atomic" - "time" -) - -// Exporter is a type for functions that receive sampled trace spans. -// -// The ExportSpan method should be safe for concurrent use and should return -// quickly; if an Exporter takes a significant amount of time to process a -// SpanData, that work should be done on another goroutine. -// -// The SpanData should not be modified, but a pointer to it can be kept. -type Exporter interface { - ExportSpan(s *SpanData) -} - -type exportersMap map[Exporter]struct{} - -var ( - exporterMu sync.Mutex - exporters atomic.Value -) - -// RegisterExporter adds to the list of Exporters that will receive sampled -// trace spans. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - new[e] = struct{}{} - exporters.Store(new) - exporterMu.Unlock() -} - -// UnregisterExporter removes from the list of Exporters the Exporter that was -// registered with the given name. -func UnregisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - delete(new, e) - exporters.Store(new) - exporterMu.Unlock() -} - -// SpanData contains all the information collected by a Span. -type SpanData struct { - SpanContext - ParentSpanID SpanID - SpanKind int - Name string - StartTime time.Time - // The wall clock time of EndTime will be adjusted to always be offset - // from StartTime by the duration of the span. - EndTime time.Time - // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - Annotations []Annotation - MessageEvents []MessageEvent - Status - Links []Link - HasRemoteParent bool - DroppedAttributeCount int - DroppedAnnotationCount int - DroppedMessageEventCount int - DroppedLinkCount int - - // ChildSpanCount holds the number of child span created for this span. - ChildSpanCount int -} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go deleted file mode 100644 index 7e808d8f30..0000000000 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides trace internals. -package internal - -// IDGenerator allows custom generators for TraceId and SpanId. -type IDGenerator interface { - NewTraceID() [16]byte - NewSpanID() [8]byte -} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go deleted file mode 100644 index 80095a5f6c..0000000000 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "github.com/golang/groupcache/lru" -) - -// A simple lru.Cache wrapper that tracks the keys of the current contents and -// the cumulative number of evicted items. -type lruMap struct { - cacheKeys map[lru.Key]bool - cache *lru.Cache - droppedCount int -} - -func newLruMap(size int) *lruMap { - lm := &lruMap{ - cacheKeys: make(map[lru.Key]bool), - cache: lru.New(size), - droppedCount: 0, - } - lm.cache.OnEvicted = func(key lru.Key, value interface{}) { - delete(lm.cacheKeys, key) - lm.droppedCount++ - } - return lm -} - -func (lm lruMap) len() int { - return lm.cache.Len() -} - -func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, 0, len(lm.cacheKeys)) - for k := range lm.cacheKeys { - keys = append(keys, k) - } - return keys -} - -func (lm *lruMap) add(key, value interface{}) { - lm.cacheKeys[lru.Key(key)] = true - lm.cache.Add(lru.Key(key), value) -} - -func (lm *lruMap) get(key interface{}) (interface{}, bool) { - return lm.cache.Get(key) -} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go deleted file mode 100644 index 1eb190a96a..0000000000 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implements the binary trace context format. -package propagation // import "go.opencensus.io/trace/propagation" - -// TODO: link to external spec document. - -// BinaryFormat format: -// -// Binary value: -// version_id: 1 byte representing the version id. -// -// For version_id = 0: -// -// version_format: -// field_format: -// -// Fields: -// -// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. -// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. -// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. -// -// Fields MUST be encoded using the field id order (smaller to higher). -// -// Valid value example: -// -// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, -// 98, 99, 100, 101, 102, 103, 104, 2, 1} -// -// version_id = 0; -// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} -// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; -// trace_options = {1}; - -import ( - "net/http" - - "go.opencensus.io/trace" -) - -// Binary returns the binary format representation of a SpanContext. -// -// If sc is the zero value, Binary returns nil. -func Binary(sc trace.SpanContext) []byte { - if sc == (trace.SpanContext{}) { - return nil - } - var b [29]byte - copy(b[2:18], sc.TraceID[:]) - b[18] = 1 - copy(b[19:27], sc.SpanID[:]) - b[27] = 2 - b[28] = uint8(sc.TraceOptions) - return b[:] -} - -// FromBinary returns the SpanContext represented by b. -// -// If b has an unsupported version ID or contains no TraceID, FromBinary -// returns with ok==false. -func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { - if len(b) == 0 || b[0] != 0 { - return trace.SpanContext{}, false - } - b = b[1:] - if len(b) >= 17 && b[0] == 0 { - copy(sc.TraceID[:], b[1:17]) - b = b[17:] - } else { - return trace.SpanContext{}, false - } - if len(b) >= 9 && b[0] == 1 { - copy(sc.SpanID[:], b[1:9]) - b = b[9:] - } - if len(b) >= 2 && b[0] == 2 { - sc.TraceOptions = trace.TraceOptions(b[1]) - } - return sc, true -} - -// HTTPFormat implementations propagate span contexts -// in HTTP requests. -// -// SpanContextFromRequest extracts a span context from incoming -// requests. -// -// SpanContextToRequest modifies the given request to include the given -// span context. -type HTTPFormat interface { - SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) - SpanContextToRequest(sc trace.SpanContext, req *http.Request) -} - -// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go deleted file mode 100644 index 71c10f9e3b..0000000000 --- a/vendor/go.opencensus.io/trace/sampling.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "encoding/binary" -) - -const defaultSamplingProbability = 1e-4 - -// Sampler decides whether a trace should be sampled and exported. -type Sampler func(SamplingParameters) SamplingDecision - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext SpanContext - TraceID TraceID - SpanID SpanID - Name string - HasRemoteParent bool -} - -// SamplingDecision is the value returned by a Sampler. -type SamplingDecision struct { - Sample bool -} - -// ProbabilitySampler returns a Sampler that samples a given fraction of traces. -// -// It also samples spans whose parents are sampled. -func ProbabilitySampler(fraction float64) Sampler { - if !(fraction >= 0) { - fraction = 0 - } else if fraction >= 1 { - return AlwaysSample() - } - - traceIDUpperBound := uint64(fraction * (1 << 63)) - return Sampler(func(p SamplingParameters) SamplingDecision { - if p.ParentContext.IsSampled() { - return SamplingDecision{Sample: true} - } - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 - return SamplingDecision{Sample: x < traceIDUpperBound} - }) -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: true} - } -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: false} - } -} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go deleted file mode 100644 index fbabad34c0..0000000000 --- a/vendor/go.opencensus.io/trace/spanbucket.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "time" -) - -// samplePeriod is the minimum time between accepting spans in a single bucket. -const samplePeriod = time.Second - -// defaultLatencies contains the default latency bucket bounds. -// TODO: consider defaults, make configurable -var defaultLatencies = [...]time.Duration{ - 10 * time.Microsecond, - 100 * time.Microsecond, - time.Millisecond, - 10 * time.Millisecond, - 100 * time.Millisecond, - time.Second, - 10 * time.Second, - time.Minute, -} - -// bucket is a container for a set of spans for a particular error code or latency range. -type bucket struct { - nextTime time.Time // next time we can accept a span - buffer []*SpanData // circular buffer of spans - nextIndex int // location next SpanData should be placed in buffer - overflow bool // whether the circular buffer has wrapped around -} - -func makeBucket(bufferSize int) bucket { - return bucket{ - buffer: make([]*SpanData, bufferSize), - } -} - -// add adds a span to the bucket, if nextTime has been reached. -func (b *bucket) add(s *SpanData) { - if s.EndTime.Before(b.nextTime) { - return - } - if len(b.buffer) == 0 { - return - } - b.nextTime = s.EndTime.Add(samplePeriod) - b.buffer[b.nextIndex] = s - b.nextIndex++ - if b.nextIndex == len(b.buffer) { - b.nextIndex = 0 - b.overflow = true - } -} - -// size returns the number of spans in the bucket. -func (b *bucket) size() int { - if b.overflow { - return len(b.buffer) - } - return b.nextIndex -} - -// span returns the ith span in the bucket. -func (b *bucket) span(i int) *SpanData { - if !b.overflow { - return b.buffer[i] - } - if i < len(b.buffer)-b.nextIndex { - return b.buffer[b.nextIndex+i] - } - return b.buffer[b.nextIndex+i-len(b.buffer)] -} - -// resize changes the size of the bucket to n, keeping up to n existing spans. -func (b *bucket) resize(n int) { - cur := b.size() - newBuffer := make([]*SpanData, n) - if cur < n { - for i := 0; i < cur; i++ { - newBuffer[i] = b.span(i) - } - b.buffer = newBuffer - b.nextIndex = cur - b.overflow = false - return - } - for i := 0; i < n; i++ { - newBuffer[i] = b.span(i + cur - n) - } - b.buffer = newBuffer - b.nextIndex = 0 - b.overflow = true -} - -// latencyBucket returns the appropriate bucket number for a given latency. -func latencyBucket(latency time.Duration) int { - i := 0 - for i < len(defaultLatencies) && latency >= defaultLatencies[i] { - i++ - } - return i -} - -// latencyBucketBounds returns the lower and upper bounds for a latency bucket -// number. -// -// The lower bound is inclusive, the upper bound is exclusive (except for the -// last bucket.) -func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { - if index == 0 { - return 0, defaultLatencies[index] - } - if index == len(defaultLatencies) { - return defaultLatencies[index-1], 1<<63 - 1 - } - return defaultLatencies[index-1], defaultLatencies[index] -} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go deleted file mode 100644 index e601f76f2c..0000000000 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "time" - - "go.opencensus.io/internal" -) - -const ( - maxBucketSize = 100000 - defaultBucketSize = 10 -) - -var ( - ssmu sync.RWMutex // protects spanStores - spanStores = make(map[string]*spanStore) -) - -// This exists purely to avoid exposing internal methods used by z-Pages externally. -type internalOnly struct{} - -func init() { - //TODO(#412): remove - internal.Trace = &internalOnly{} -} - -// ReportActiveSpans returns the active spans for the given name. -func (i internalOnly) ReportActiveSpans(name string) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for activeSpan := range s.active { - if s, ok := activeSpan.(*span); ok { - out = append(out, s.makeSpanData()) - } - } - return out -} - -// ReportSpansByError returns a sample of error spans. -// -// If code is nonzero, only spans with that status code are returned. -func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - if code != 0 { - if b, ok := s.errors[code]; ok { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } else { - for _, b := range s.errors { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } - return out -} - -// ConfigureBucketSizes sets the number of spans to keep per latency and error -// bucket for different span names. -func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { - for _, bc := range bcs { - latencyBucketSize := bc.MaxRequestsSucceeded - if latencyBucketSize < 0 { - latencyBucketSize = 0 - } - if latencyBucketSize > maxBucketSize { - latencyBucketSize = maxBucketSize - } - errorBucketSize := bc.MaxRequestsErrors - if errorBucketSize < 0 { - errorBucketSize = 0 - } - if errorBucketSize > maxBucketSize { - errorBucketSize = maxBucketSize - } - spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) - } -} - -// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. -func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { - out := make(map[string]internal.PerMethodSummary) - ssmu.RLock() - defer ssmu.RUnlock() - for name, s := range spanStores { - s.mu.Lock() - p := internal.PerMethodSummary{ - Active: len(s.active), - } - for code, b := range s.errors { - p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ - ErrorCode: code, - Size: b.size(), - }) - } - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ - MinLatency: min, - MaxLatency: max, - Size: b.size(), - }) - } - s.mu.Unlock() - out[name] = p - } - return out -} - -// ReportSpansByLatency returns a sample of successful spans. -// -// minLatency is the minimum latency of spans to be returned. -// maxLatency, if nonzero, is the maximum latency of spans to be returned. -func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - if i+1 != len(s.latency) && max <= minLatency { - continue - } - if maxLatency != 0 && maxLatency < min { - continue - } - for _, sd := range b.buffer { - if sd == nil { - break - } - if minLatency != 0 || maxLatency != 0 { - d := sd.EndTime.Sub(sd.StartTime) - if d < minLatency { - continue - } - if maxLatency != 0 && d > maxLatency { - continue - } - } - out = append(out, sd) - } - } - return out -} - -// spanStore keeps track of spans stored for a particular span name. -// -// It contains all active spans; a sample of spans for failed requests, -// categorized by error code; and a sample of spans for successful requests, -// bucketed by latency. -type spanStore struct { - mu sync.Mutex // protects everything below. - active map[SpanInterface]struct{} - errors map[int32]*bucket - latency []bucket - maxSpansPerErrorBucket int -} - -// newSpanStore creates a span store. -func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { - s := &spanStore{ - active: make(map[SpanInterface]struct{}), - latency: make([]bucket, len(defaultLatencies)+1), - maxSpansPerErrorBucket: errorBucketSize, - } - for i := range s.latency { - s.latency[i] = makeBucket(latencyBucketSize) - } - return s -} - -// spanStoreForName returns the spanStore for the given name. -// -// It returns nil if it doesn't exist. -func spanStoreForName(name string) *spanStore { - var s *spanStore - ssmu.RLock() - s, _ = spanStores[name] - ssmu.RUnlock() - return s -} - -// spanStoreForNameCreateIfNew returns the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreForNameCreateIfNew(name string) *spanStore { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - return s - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - return s - } - s = newSpanStore(name, defaultBucketSize, defaultBucketSize) - spanStores[name] = s - return s -} - -// spanStoreSetSize resizes the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - s = newSpanStore(name, latencyBucketSize, errorBucketSize) - spanStores[name] = s -} - -func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { - s.mu.Lock() - for i := range s.latency { - s.latency[i].resize(latencyBucketSize) - } - for _, b := range s.errors { - b.resize(errorBucketSize) - } - s.maxSpansPerErrorBucket = errorBucketSize - s.mu.Unlock() -} - -// add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span SpanInterface) { - s.mu.Lock() - s.active[span] = struct{}{} - s.mu.Unlock() -} - -// finished removes a span from the active set, and adds a corresponding -// SpanData to a latency or error bucket. -func (s *spanStore) finished(span SpanInterface, sd *SpanData) { - latency := sd.EndTime.Sub(sd.StartTime) - if latency < 0 { - latency = 0 - } - code := sd.Status.Code - - s.mu.Lock() - delete(s.active, span) - if code == 0 { - s.latency[latencyBucket(latency)].add(sd) - } else { - if s.errors == nil { - s.errors = make(map[int32]*bucket) - } - if b := s.errors[code]; b != nil { - b.add(sd) - } else { - b := makeBucket(s.maxSpansPerErrorBucket) - s.errors[code] = &b - b.add(sd) - } - } - s.mu.Unlock() -} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go deleted file mode 100644 index ec60effd10..0000000000 --- a/vendor/go.opencensus.io/trace/status_codes.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -// Status codes for use with Span.SetStatus. These correspond to the status -// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -const ( - StatusCodeOK = 0 - StatusCodeCancelled = 1 - StatusCodeUnknown = 2 - StatusCodeInvalidArgument = 3 - StatusCodeDeadlineExceeded = 4 - StatusCodeNotFound = 5 - StatusCodeAlreadyExists = 6 - StatusCodePermissionDenied = 7 - StatusCodeResourceExhausted = 8 - StatusCodeFailedPrecondition = 9 - StatusCodeAborted = 10 - StatusCodeOutOfRange = 11 - StatusCodeUnimplemented = 12 - StatusCodeInternal = 13 - StatusCodeUnavailable = 14 - StatusCodeDataLoss = 15 - StatusCodeUnauthenticated = 16 -) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go deleted file mode 100644 index 861df9d391..0000000000 --- a/vendor/go.opencensus.io/trace/trace.go +++ /dev/null @@ -1,595 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/trace/tracestate" -) - -type tracer struct{} - -var _ Tracer = &tracer{} - -// Span represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type span struct { - // data contains information recorded about the span. - // - // It will be non-nil if we are exporting the span or recording events for it. - // Otherwise, data is nil, and the Span is simply a carrier for the - // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext - - // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry - // is removed to create room for a new entry. - lruAttributes *lruMap - - // annotations are stored in FIFO queue capped by configured limit. - annotations *evictedQueue - - // messageEvents are stored in FIFO queue capped by configured limit. - messageEvents *evictedQueue - - // links are stored in FIFO queue capped by configured limit. - links *evictedQueue - - // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. - *spanStore - endOnce sync.Once - - executionTracerTaskEnd func() // ends the execution tracer span -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.data != nil -} - -// TraceOptions contains options associated with a trace span. -type TraceOptions uint32 - -// IsSampled returns true if the span will be exported. -func (sc SpanContext) IsSampled() bool { - return sc.TraceOptions.IsSampled() -} - -// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. -func (sc *SpanContext) setIsSampled(sampled bool) { - if sampled { - sc.TraceOptions |= 1 - } else { - sc.TraceOptions &= ^TraceOptions(1) - } -} - -// IsSampled returns true if the span will be exported. -func (t TraceOptions) IsSampled() bool { - return t&1 == 1 -} - -// SpanContext contains the state that must propagate across process boundaries. -// -// SpanContext is not an implementation of context.Context. -// TODO: add reference to external Census docs for SpanContext. -type SpanContext struct { - TraceID TraceID - SpanID SpanID - TraceOptions TraceOptions - Tracestate *tracestate.Tracestate -} - -type contextKey struct{} - -// FromContext returns the Span stored in a context, or nil if there isn't one. -func (t *tracer) FromContext(ctx context.Context) *Span { - s, _ := ctx.Value(contextKey{}).(*Span) - return s -} - -// NewContext returns a new context with the given Span attached. -func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { - return context.WithValue(parent, contextKey{}, s) -} - -// All available span kinds. Span kind must be either one of these values. -const ( - SpanKindUnspecified = iota - SpanKindServer - SpanKindClient -) - -// StartOptions contains options concerning how a span is started. -type StartOptions struct { - // Sampler to consult for this Span. If provided, it is always consulted. - // - // If not provided, then the behavior differs based on whether - // the parent of this Span is remote, local, or there is no parent. - // In the case of a remote parent or no parent, the - // default sampler (see Config) will be consulted. Otherwise, - // when there is a non-remote parent, no new sampling decision will be made: - // we will preserve the sampling of the parent. - Sampler Sampler - - // SpanKind represents the kind of a span. If none is set, - // SpanKindUnspecified is used. - SpanKind int -} - -// StartOption apply changes to StartOptions. -type StartOption func(*StartOptions) - -// WithSpanKind makes new spans to be created with the given kind. -func WithSpanKind(spanKind int) StartOption { - return func(o *StartOptions) { - o.SpanKind = spanKind - } -} - -// WithSampler makes new spans to be be created with a custom sampler. -// Otherwise, the global sampler is used. -func WithSampler(sampler Sampler) StartOption { - return func(o *StartOptions) { - o.Sampler = sampler - } -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - var parent SpanContext - if p := t.FromContext(ctx); p != nil { - if ps, ok := p.internal.(*span); ok { - ps.addChild() - } - parent = p.SpanContext() - } - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) - - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { - s := &span{} - s.spanContext = parent - - cfg := config.Load().(*Config) - if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { - // lazy initialization - gen.init() - } - - if !hasParent { - s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() - } - s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() - sampler := cfg.DefaultSampler - - if !hasParent || remoteParent || o.Sampler != nil { - // If this span is the child of a local span and no Sampler is set in the - // options, keep the parent's TraceOptions. - // - // Otherwise, consult the Sampler in the options if it is non-nil, otherwise - // the default sampler. - if o.Sampler != nil { - sampler = o.Sampler - } - s.spanContext.setIsSampled(sampler(SamplingParameters{ - ParentContext: parent, - TraceID: s.spanContext.TraceID, - SpanID: s.spanContext.SpanID, - Name: name, - HasRemoteParent: remoteParent}).Sample) - } - - if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { - return s - } - - s.data = &SpanData{ - SpanContext: s.spanContext, - StartTime: time.Now(), - SpanKind: o.SpanKind, - Name: name, - HasRemoteParent: remoteParent, - } - s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - s.links = newEvictedQueue(cfg.MaxLinksPerSpan) - - if hasParent { - s.data.ParentSpanID = parent.SpanID - } - if internal.LocalSpanStoreEnabled { - var ss *spanStore - ss = spanStoreForNameCreateIfNew(name) - if ss != nil { - s.spanStore = ss - ss.add(s) - } - } - - return s -} - -// End ends the span. -func (s *span) End() { - if s == nil { - return - } - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - if !s.IsRecordingEvents() { - return - } - s.endOnce.Do(func() { - exp, _ := exporters.Load().(exportersMap) - mustExport := s.spanContext.IsSampled() && len(exp) > 0 - if s.spanStore != nil || mustExport { - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if mustExport { - for e := range exp { - e.ExportSpan(sd) - } - } - } - }) -} - -// makeSpanData produces a SpanData representing the current state of the Span. -// It requires that s.data is non-nil. -func (s *span) makeSpanData() *SpanData { - var sd SpanData - s.mu.Lock() - sd = *s.data - if s.lruAttributes.len() > 0 { - sd.Attributes = s.lruAttributesToAttributeMap() - sd.DroppedAttributeCount = s.lruAttributes.droppedCount - } - if len(s.annotations.queue) > 0 { - sd.Annotations = s.interfaceArrayToAnnotationArray() - sd.DroppedAnnotationCount = s.annotations.droppedCount - } - if len(s.messageEvents.queue) > 0 { - sd.MessageEvents = s.interfaceArrayToMessageEventArray() - sd.DroppedMessageEventCount = s.messageEvents.droppedCount - } - if len(s.links.queue) > 0 { - sd.Links = s.interfaceArrayToLinksArray() - sd.DroppedLinkCount = s.links.droppedCount - } - s.mu.Unlock() - return &sd -} - -// SpanContext returns the SpanContext of the span. -func (s *span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.spanContext -} - -// SetName sets the name of the span, if it is recording events. -func (s *span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Name = name - s.mu.Unlock() -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Status = status - s.mu.Unlock() -} - -func (s *span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0, len(s.links.queue)) - for _, value := range s.links.queue { - linksArr = append(linksArr, value.(Link)) - } - return linksArr -} - -func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) - for _, value := range s.messageEvents.queue { - messageEventArr = append(messageEventArr, value.(MessageEvent)) - } - return messageEventArr -} - -func (s *span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0, len(s.annotations.queue)) - for _, value := range s.annotations.queue { - annotationArr = append(annotationArr, value.(Annotation)) - } - return annotationArr -} - -func (s *span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}, s.lruAttributes.len()) - for _, key := range s.lruAttributes.keys() { - value, ok := s.lruAttributes.get(key) - if ok { - keyStr := key.(string) - attributes[keyStr] = value - } - } - return attributes -} - -func (s *span) copyToCappedAttributes(attributes []Attribute) { - for _, a := range attributes { - s.lruAttributes.add(a.key, a.value) - } -} - -func (s *span) addChild() { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.ChildSpanCount++ - s.mu.Unlock() -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.copyToCappedAttributes(attributes) - s.mu.Unlock() -} - -func (s *span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var am map[string]interface{} - if len(attributes) != 0 { - am = make(map[string]interface{}, len(attributes)) - for _, attr := range attributes { - am[attr.key] = attr.value - } - } - s.mu.Lock() - s.annotations.add(Annotation{ - Time: now, - Message: str, - Attributes: am, - }) - s.mu.Unlock() -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, fmt.Sprintf(format, a...)) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeSent, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeRecv, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddLink adds a link to the span. -func (s *span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.links.add(l) - s.mu.Unlock() -} - -func (s *span) String() string { - if s == nil { - return "" - } - if s.data == nil { - return fmt.Sprintf("span %s", s.spanContext.SpanID) - } - s.mu.Lock() - str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) - s.mu.Unlock() - return str -} - -var config atomic.Value // access atomically - -func init() { - config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: &defaultIDGenerator{}, - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, - MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, - }) -} - -type defaultIDGenerator struct { - sync.Mutex - - // Please keep these as the first fields - // so that these 8 byte fields will be aligned on addresses - // divisible by 8, on both 32-bit and 64-bit machines when - // performing atomic increments and accesses. - // See: - // * https://github.com/census-instrumentation/opencensus-go/issues/587 - // * https://github.com/census-instrumentation/opencensus-go/issues/865 - // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG - nextSpanID uint64 - spanIDInc uint64 - - traceIDAdd [2]uint64 - traceIDRand *rand.Rand - - initOnce sync.Once -} - -// init initializes the generator on the first call to avoid consuming entropy -// unnecessarily. -func (gen *defaultIDGenerator) init() { - gen.initOnce.Do(func() { - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - }) -} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *defaultIDGenerator) NewSpanID() [8]byte { - var id uint64 - for id == 0 { - id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) - } - var sid [8]byte - binary.LittleEndian.PutUint64(sid[:], id) - return sid -} - -// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. -// mu should be held while this function is called. -func (gen *defaultIDGenerator) NewTraceID() [16]byte { - var tid [16]byte - // Construct the trace ID from two outputs of traceIDRand, with a constant - // added to each half for additional entropy. - gen.Lock() - binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) - binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) - gen.Unlock() - return tid -} diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go deleted file mode 100644 index 9e2c3a9992..0000000000 --- a/vendor/go.opencensus.io/trace/trace_api.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" -) - -// DefaultTracer is the tracer used when package-level exported functions are invoked. -var DefaultTracer Tracer = &tracer{} - -// Tracer can start spans and access context functions. -type Tracer interface { - - // StartSpan starts a new child span of the current span in the context. If - // there is no span in the context, creates a new trace and span. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) - - // StartSpanWithRemoteParent starts a new child span of the span from the given parent. - // - // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is - // preferred for cases where the parent is propagated via an incoming request. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) - - // FromContext returns the Span stored in a context, or nil if there isn't one. - FromContext(ctx context.Context) *Span - - // NewContext returns a new context with the given Span attached. - NewContext(parent context.Context, s *Span) context.Context -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpan(ctx, name, o...) -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) -} - -// FromContext returns the Span stored in a context, or a Span that is not -// recording events if there isn't one. -func FromContext(ctx context.Context) *Span { - return DefaultTracer.FromContext(ctx) -} - -// NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { - return DefaultTracer.NewContext(parent, s) -} - -// SpanInterface represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type SpanInterface interface { - - // IsRecordingEvents returns true if events are being recorded for this span. - // Use this check to avoid computing expensive annotations when they will never - // be used. - IsRecordingEvents() bool - - // End ends the span. - End() - - // SpanContext returns the SpanContext of the span. - SpanContext() SpanContext - - // SetName sets the name of the span, if it is recording events. - SetName(name string) - - // SetStatus sets the status of the span, if it is recording events. - SetStatus(status Status) - - // AddAttributes sets attributes in the span. - // - // Existing attributes whose keys appear in the attributes parameter are overwritten. - AddAttributes(attributes ...Attribute) - - // Annotate adds an annotation with attributes. - // Attributes can be nil. - Annotate(attributes []Attribute, str string) - - // Annotatef adds an annotation with attributes. - Annotatef(attributes []Attribute, format string, a ...interface{}) - - // AddMessageSendEvent adds a message send event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddMessageReceiveEvent adds a message receive event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddLink adds a link to the span. - AddLink(l Link) - - // String prints a string representation of a span. - String() string -} - -// NewSpan is a convenience function for creating a *Span out of a *span -func NewSpan(s SpanInterface) *Span { - return &Span{internal: s} -} - -// Span is a struct wrapper around the SpanInt interface, which allows correctly handling -// nil spans, while also allowing the SpanInterface implementation to be swapped out. -type Span struct { - internal SpanInterface -} - -// Internal returns the underlying implementation of the Span -func (s *Span) Internal() SpanInterface { - return s.internal -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *Span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.internal.IsRecordingEvents() -} - -// End ends the span. -func (s *Span) End() { - if s == nil { - return - } - s.internal.End() -} - -// SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.internal.SpanContext() -} - -// SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetName(name) -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetStatus(status) -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddAttributes(attributes...) -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotate(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotatef(attributes, format, a...) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddLink(l) -} - -// String prints a string representation of a span. -func (s *Span) String() string { - if s == nil { - return "" - } - return s.internal.String() -} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go deleted file mode 100644 index b8fc1e495a..0000000000 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.11 -// +build go1.11 - -package trace - -import ( - "context" - t "runtime/trace" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - if !t.IsEnabled() { - // Avoid additional overhead if - // runtime/trace is not enabled. - return ctx, func() {} - } - nctx, task := t.NewTask(ctx, name) - return nctx, task.End -} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go deleted file mode 100644 index da488fc874..0000000000 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.11 -// +build !go1.11 - -package trace - -import ( - "context" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - return ctx, func() {} -} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go deleted file mode 100644 index 2d6c713eb3..0000000000 --- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracestate implements support for the Tracestate header of the -// W3C TraceContext propagation format. -package tracestate - -import ( - "fmt" - "regexp" -) - -const ( - keyMaxSize = 256 - valueMaxSize = 256 - maxKeyValuePairs = 32 -) - -const ( - keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` -) - -var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) -var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) - -// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different -// vendors propagate additional information and inter-operate with their legacy Id formats. -type Tracestate struct { - entries []Entry -} - -// Entry represents one key-value pair in a list of key-value pair of Tracestate. -type Entry struct { - // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, - // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and - // forward slashes /. - Key string - - // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the - // range 0x20 to 0x7E) except comma , and =. - Value string -} - -// Entries returns a slice of Entry. -func (ts *Tracestate) Entries() []Entry { - if ts == nil { - return nil - } - return ts.entries -} - -func (ts *Tracestate) remove(key string) *Entry { - for index, entry := range ts.entries { - if entry.Key == key { - ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) - return &entry - } - } - return nil -} - -func (ts *Tracestate) add(entries []Entry) error { - for _, entry := range entries { - ts.remove(entry.Key) - } - if len(ts.entries)+len(entries) > maxKeyValuePairs { - return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", - len(entries), len(ts.entries), maxKeyValuePairs) - } - ts.entries = append(entries, ts.entries...) - return nil -} - -func isValid(entry Entry) bool { - return keyValidationRegExp.MatchString(entry.Key) && - valueValidationRegExp.MatchString(entry.Value) -} - -func containsDuplicateKey(entries ...Entry) (string, bool) { - keyMap := make(map[string]int) - for _, entry := range entries { - if _, ok := keyMap[entry.Key]; ok { - return entry.Key, true - } - keyMap[entry.Key] = 1 - } - return "", false -} - -func areEntriesValid(entries ...Entry) (*Entry, bool) { - for _, entry := range entries { - if !isValid(entry) { - return &entry, false - } - } - return nil, true -} - -// New creates a Tracestate object from a parent and/or entries (key-value pair). -// Entries from the parent are copied if present. The entries passed to this function -// are inserted in front of those copied from the parent. If an entry copied from the -// parent contains the same key as one of the entry in entries then the entry copied -// from the parent is removed. See add func. -// -// An error is returned with nil Tracestate if -// 1. one or more entry in entries is invalid. -// 2. two or more entries in the input entries have the same key. -// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. -// (duplicate entry is counted only once). -func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { - if parent == nil && len(entries) == 0 { - return nil, nil - } - if entry, ok := areEntriesValid(entries...); !ok { - return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) - } - - if key, duplicate := containsDuplicateKey(entries...); duplicate { - return nil, fmt.Errorf("contains duplicate keys (%s)", key) - } - - tracestate := Tracestate{} - - if parent != nil && len(parent.entries) > 0 { - tracestate.entries = append([]Entry{}, parent.entries...) - } - - err := tracestate.add(entries) - if err != nil { - return nil, err - } - return &tracestate, nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 0000000000..773c9b6431 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 0000000000..088d19a6ce --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 0000000000..ad73d8cb9d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 0000000000..af6ef171f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 0000000000..949e2165c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 0000000000..e854d7e84e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 0000000000..29e629d667 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 0000000000..cecad8bae3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 0000000000..b6f2e28d40 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 0000000000..a13a6b733d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 0000000000..1217776ead --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 0000000000..69a348f0f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 0000000000..0dd01b063a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 0000000000..86babf1a88 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 0000000000..6ebea12a9e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 0000000000..cbcfabde3b --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 0000000000..dbc477a59a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 67f8d73399..18436eaedf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -1,23 +1,15 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( + "google.golang.org/grpc/stats" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/propagation" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" @@ -30,18 +22,28 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) -// Filter is a predicate used to determine whether a given request in -// interceptor info should be traced. A Filter must return true if +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if // the request should be traced. -type Filter func(*InterceptorInfo) bool +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attached RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool // config is a group of options for this instrumentation. type config struct { - Filter Filter - Propagators propagation.TextMapPropagator - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider - SpanStartOptions []trace.SpanStartOption + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -89,6 +91,9 @@ func newConfig(opts []Option, role string) *config { metric.WithUnit("ms")) if err != nil { otel.Handle(err) + if c.rpcDuration == nil { + c.rpcDuration = noop.Float64Histogram{} + } } c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", @@ -96,6 +101,9 @@ func newConfig(opts []Option, role string) *config { metric.WithUnit("By")) if err != nil { otel.Handle(err) + if c.rpcRequestSize == nil { + c.rpcRequestSize = noop.Int64Histogram{} + } } c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", @@ -103,6 +111,9 @@ func newConfig(opts []Option, role string) *config { metric.WithUnit("By")) if err != nil { otel.Handle(err) + if c.rpcResponseSize == nil { + c.rpcResponseSize = noop.Int64Histogram{} + } } c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", @@ -110,6 +121,9 @@ func newConfig(opts []Option, role string) *config { metric.WithUnit("{count}")) if err != nil { otel.Handle(err) + if c.rpcRequestsPerRPC == nil { + c.rpcRequestsPerRPC = noop.Int64Histogram{} + } } c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", @@ -117,6 +131,9 @@ func newConfig(opts []Option, role string) *config { metric.WithUnit("{count}")) if err != nil { otel.Handle(err) + if c.rpcResponsesPerRPC == nil { + c.rpcResponsesPerRPC = noop.Int64Histogram{} + } } return c @@ -147,15 +164,30 @@ func (o tracerProviderOption) apply(c *config) { // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. -func WithInterceptorFilter(f Filter) Option { +func WithInterceptorFilter(f InterceptorFilter) Option { return interceptorFilterOption{f: f} } type interceptorFilterOption struct { - f Filter + f InterceptorFilter } func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { if o.f != nil { c.Filter = o.f } @@ -227,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go index 958dcd87a4..b8b836b00f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 3b487a9362..7d5ed05808 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -59,7 +49,7 @@ var ( ) // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { @@ -81,7 +71,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { Method: method, Type: UnaryClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } @@ -147,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -196,7 +186,7 @@ func (w *clientStream) CloseSend() error { return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, span: span, @@ -219,7 +209,7 @@ func (w *clientStream) endSpan(err error) { } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { @@ -241,7 +231,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { Method: method, Type: StreamClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return streamer(ctx, desc, cc, method, callOpts...) } @@ -270,7 +260,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, span, cfg) + stream := wrapClientStream(s, desc, span, cfg) return stream, nil } } @@ -296,7 +286,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { UnaryServerInfo: info, Type: UnaryServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(ctx, req) } @@ -344,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } @@ -422,7 +412,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { StreamServerInfo: info, Type: StreamServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(srv, wrapServerStream(ctx, ss, cfg)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go index f6116946bf..b62f7cd7c4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index cf32a9e978..bef07b7a3c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index f585fb6ae0..3aa37915df 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index b65fab308f..409c621b74 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index bbe5d65858..fbcbfb84e0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -20,6 +9,7 @@ import ( "time" grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -37,6 +27,7 @@ type gRPCContext struct { messagesReceived int64 messagesSent int64 metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -71,11 +62,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -107,11 +102,15 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -140,6 +139,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) if gctx != nil { + if !gctx.record { + return + } metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } @@ -149,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -165,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -179,6 +181,10 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool ) } case *stats.OutTrailer: + case *stats.OutHeader: + if p, ok := peer.FromContext(ctx); ok { + span.SetAttributes(peerAttr(p.Addr.String())...) + } case *stats.End: var rpcStatusAttr attribute.KeyValue @@ -198,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index ae1f8ea5e7..04f425edfe 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.47.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73c9..b25641c55d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,13 +12,13 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -38,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -47,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 9509014e87..a83a026274 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -29,14 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - RequestCount = "http.server.request_count" // Incoming request count total - RequestContentLength = "http.server.request_content_length" // Incoming request bytes total - ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total - ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5aa..a01bfafbe0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -19,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -44,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -111,7 +103,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. @@ -205,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c71..56b24b982a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index af84f0e4bb..e555a475f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,18 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -36,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -46,9 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -76,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -87,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -97,36 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - RequestContentLength, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - ResponseContentLength, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - ServerLatency, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of HTTP request handling"), - ) - handleErr(err) + h.semconv = semconv.NewHTTPServer(c.Meter) } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -143,12 +95,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -168,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -178,14 +132,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -195,13 +147,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,61 +163,52 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) - - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read, o) - h.responseBytesCounter.Add(ctx, rww.written, o) + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), + })...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) -} - -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000..a945f55661 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000..fbc344cbdd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 0000000000..3b036f8a37 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,237 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return OldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return OldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return OldHTTPServer{}.Route(route) +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (s HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +var metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used instead of NewHTTPServer. + return + } + + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + return client +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) + } + return OldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return OldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return CurrentHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 0000000000..dc9ec7bc39 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type CurrentHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type CurrentHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 0000000000..93e8d0f94c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// SplitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func SplitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 0000000000..c042249dd7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + "slices" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type OldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o OldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type OldHTTPClient struct{} + +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d31..7aa5f99e81 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 794d4c26a4..a73bb06e90 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -43,14 +32,22 @@ func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { } // HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". +// The following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length". func HTTPClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } +// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the +// related values are defined in req: "net.peer.port". +func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { + return hc.ClientRequestMetrics(req) +} + // HTTPClientStatus returns a span status code and message for an HTTP status code // value received by a client. func HTTPClientStatus(code int) (codes.Code, string) { @@ -75,10 +72,9 @@ func HTTPClientStatus(code int) (codes.Code, string) { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". +// "http.target", "net.host.name". The following attributes are returned if +// they related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip". func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } @@ -101,8 +97,8 @@ func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequestMetrics(server, req) } @@ -114,36 +110,6 @@ func HTTPServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } -// HTTPRequestHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPRequestHeader(h http.Header) []attribute.KeyValue { - return hc.RequestHeader(h) -} - -// HTTPResponseHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPResponseHeader(h http.Header) []attribute.KeyValue { - return hc.ResponseHeader(h) -} - // httpConv are the HTTP semantic convention attributes defined for a version // of the OpenTelemetry specification. type httpConv struct { @@ -214,7 +180,7 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { // ClientRequest returns attributes for an HTTP request made by a client. The // following attributes are always returned: "http.url", "http.method", // "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "http.user_agent", +// are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length", "user_agent.original". func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { /* The following semantic conventions are returned if present: @@ -286,6 +252,38 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { return attrs } +// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the related values +// are defined in req: "net.peer.port". +func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) + + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + return attrs +} + // ServerRequest returns attributes for an HTTP request received by a server. // // The server must be the primary server name if it is known. For example this @@ -439,8 +437,8 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.scheme string @@ -551,31 +549,6 @@ func firstHostPort(source ...string) (host string, port int) { return } -// RequestHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { - return c.header("http.request.header", h) -} - -// ResponseHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { - return c.header("http.response.header", h) -} - -func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { - key := func(k string) attribute.Key { - k = strings.ToLower(k) - k = strings.ReplaceAll(k, "-", "_") - k = fmt.Sprintf("%s.%s", prefix, k) - return attribute.Key(k) - } - - attrs := make([]attribute.KeyValue, 0, len(h)) - for k, v := range h { - attrs = append(attrs, key(k).StringSlice(v)) - } - return attrs -} - // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func (c *httpConv) ClientStatus(code int) (codes.Code, string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index cb4cb9355d..b80a1db61f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -32,24 +22,6 @@ func NetTransport(network string) attribute.KeyValue { return nc.Transport(network) } -// NetClient returns trace attributes for a client network connection to address. -// See net.Dial for information about acceptable address values, address should -// be the same as the one used to create conn. If conn is nil, only network -// peer attributes will be returned that describe address. Otherwise, the -// socket level information about conn will also be included. -func NetClient(address string, conn net.Conn) []attribute.KeyValue { - return nc.Client(address, conn) -} - -// NetServer returns trace attributes for a network listener listening at address. -// See net.Listen for information about acceptable address values, address -// should be the same as the one used to create ln. If ln is nil, only network -// host attributes will be returned that describe address. Otherwise, the -// socket level information about ln will also be included. -func NetServer(address string, ln net.Listener) []attribute.KeyValue { - return nc.Server(address, ln) -} - // netConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. type netConv struct { @@ -120,57 +92,11 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } -// Server returns attributes for a network listener listening at address. See -// net.Listen for information about acceptable address values, address should -// be the same as the one used to create ln. If ln is nil, only network host -// attributes will be returned that describe address. Otherwise, the socket -// level information about ln will also be included. -func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { - if ln == nil { - return c.Host(address) - } - - lAddr := ln.Addr() - if lAddr == nil { - return c.Host(address) - } - - hostName, hostPort := splitHostPort(address) - sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) - network := lAddr.Network() - sockFamily := family(network, sockHostAddr) - - n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) - n += positiveInt(hostPort, sockHostPort) - attr := make([]attribute.KeyValue, 0, n) - if hostName != "" { - attr = append(attr, c.HostName(hostName)) - if hostPort > 0 { - // Only if net.host.name is set should net.host.port be. - attr = append(attr, c.HostPort(hostPort)) - } - } - if network != "" { - attr = append(attr, c.Transport(network)) - } - if sockFamily != "" { - attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) - } - if sockHostAddr != "" { - attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) - if sockHostPort > 0 { - // Only if net.sock.host.addr is set should net.sock.host.port be. - attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) - } - } - return attr -} - func (c *netConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } @@ -179,85 +105,6 @@ func (c *netConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } -// Client returns attributes for a client network connection to address. See -// net.Dial for information about acceptable address values, address should be -// the same as the one used to create conn. If conn is nil, only network peer -// attributes will be returned that describe address. Otherwise, the socket -// level information about conn will also be included. -func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { - if conn == nil { - return c.Peer(address) - } - - lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() - - var network string - switch { - case lAddr != nil: - network = lAddr.Network() - case rAddr != nil: - network = rAddr.Network() - default: - return c.Peer(address) - } - - peerName, peerPort := splitHostPort(address) - var ( - sockFamily string - sockPeerAddr string - sockPeerPort int - sockHostAddr string - sockHostPort int - ) - - if lAddr != nil { - sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) - } - - if rAddr != nil { - sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) - } - - switch { - case sockHostAddr != "": - sockFamily = family(network, sockHostAddr) - case sockPeerAddr != "": - sockFamily = family(network, sockPeerAddr) - } - - n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) - n += positiveInt(peerPort, sockPeerPort, sockHostPort) - attr := make([]attribute.KeyValue, 0, n) - if peerName != "" { - attr = append(attr, c.PeerName(peerName)) - if peerPort > 0 { - // Only if net.peer.name is set should net.peer.port be. - attr = append(attr, c.PeerPort(peerPort)) - } - } - if network != "" { - attr = append(attr, c.Transport(network)) - } - if sockFamily != "" { - attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) - } - if sockPeerAddr != "" { - attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) - if sockPeerPort > 0 { - // Only if net.sock.peer.addr is set should net.sock.peer.port be. - attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) - } - } - if sockHostAddr != "" { - attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) - if sockHostPort > 0 { - // Only if net.sock.host.addr is set should net.sock.host.port be. - attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) - } - } - return attr -} - func family(network, address string) string { switch network { case "unix", "unixgram", "unixpacket": @@ -273,26 +120,6 @@ func family(network, address string) string { return "" } -func nonZeroStr(strs ...string) int { - var n int - for _, str := range strs { - if str != "" { - n++ - } - } - return n -} - -func positiveInt(ints ...int) int { - var n int - for _, i := range ints { - if i > 0 { - n++ - } - } - return n -} - // Peer returns attributes for a network peer address. func (c *netConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) @@ -311,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -368,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a1805..ea504e396f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 0000000000..9476ef01b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index e835cac12e..39681ad4b0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -19,31 +8,40 @@ import ( "io" "net/http" "net/http/httptrace" + "sync/atomic" + "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps -// outbound HTTP(S) requests with a span. +// outbound HTTP(S) requests with a span and enriches it with metrics. type Transport struct { rt http.RoundTripper - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} // NewTransport wraps the provided http.RoundTripper with one that -// starts a span and injects the span context into the outbound request headers. +// starts a span, injects the span context into the outbound request headers, +// and enriches it with metrics. // // If the provided http.RoundTripper is nil, http.DefaultTransport will be used // as the base http.RoundTripper. @@ -74,6 +72,8 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) + t.metricAttributesFn = c.MetricAttributesFn } func defaultTransportFormatter(_ string, r *http.Request) string { @@ -84,6 +84,7 @@ func defaultTransportFormatter(_ string, r *http.Request) string { // before handing the request to the configured base RoundTripper. The created span will // end when the response body is closed or when a read from the body returns io.EOF. func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + requestStartTime := time.Now() for _, f := range t.filters { if !f(r) { // Simply pass through to the base RoundTripper if a filter rejects the request @@ -109,39 +110,91 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) + if r.Body != nil && r.Body != http.NoBody { + r.Body = bw + } + + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) - res.Body = newWrappedBody(span, res.Body) + // metrics + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - return res, err + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + } + + // traces + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) + + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) + + return res, nil +} + +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest } // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. -func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { +func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser { // The successful protocol switch responses will have a body that // implement an io.ReadWriteCloser. Ensure this interface type continues // to be satisfied if that is the case. if _, ok := body.(io.ReadWriteCloser); ok { - return &wrappedBody{span: span, body: body} + return &wrappedBody{span: span, record: record, body: body} } // Remove the implementation of the io.ReadWriteCloser and only implement // the io.ReadCloser. - return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} + return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}} } // wrappedBody is the response body type returned by the transport @@ -153,8 +206,11 @@ func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { // If the response body implements the io.Writer interface (i.e. for // successful protocol switches), the wrapped body also will. type wrappedBody struct { - span trace.Span - body io.ReadCloser + span trace.Span + recorded atomic.Bool + record func(n int64) + body io.ReadCloser + read atomic.Int64 } var _ io.ReadWriteCloser = &wrappedBody{} @@ -171,11 +227,14 @@ func (wb *wrappedBody) Write(p []byte) (int, error) { func (wb *wrappedBody) Read(b []byte) (int, error) { n, err := wb.body.Read(b) + // Record the number of bytes read + wb.read.Add(int64(n)) switch err { case nil: // nothing to do here but fall through to the return case io.EOF: + wb.recordBytesRead() wb.span.End() default: wb.span.RecordError(err) @@ -184,7 +243,20 @@ func (wb *wrappedBody) Read(b []byte) (int, error) { return n, err } +// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once. +func (wb *wrappedBody) recordBytesRead() { + // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that + // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using + // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish + // calling wb.record(wb.read.Load()). + if wb.recorded.CompareAndSwap(false, true) { + // Record the total number of bytes read + wb.record(wb.read.Load()) + } +} + func (wb *wrappedBody) Close() error { + wb.recordBytesRead() wb.span.End() if wb.body != nil { return wb.body.Close() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 9a4a02143d..353e43b91f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.47.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 11a35ed167..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read += n1 - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index ae6a3bcf12..6bf3abc41e 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -3,3 +3,7 @@ fo te collison consequentially +ans +nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3b..e2cb3ea944 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664be..749e8e881b 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -1,6 +1,7 @@ .DS_Store Thumbs.db +.cache/ .tools/ venv/ .idea/ @@ -12,11 +13,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382..c58e48ab0c 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,11 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -19,10 +22,16 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck + - testifylint - typecheck + - unconvert - unused + - unparam + - usestdlibvars + - usetesting issues: # Maximum issues count per one linter. @@ -54,16 +63,17 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + - perfsprint + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -88,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -120,10 +137,10 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. @@ -147,137 +164,71 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. # Default: 0.8 confidence: 0.01 + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - name: context-as-argument disabled: true arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - allowTypesBefore: "*testing.T" - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - name: defer - disabled: false arguments: - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + arguments: + - "preserveScope" - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - name: exported - disabled: false arguments: - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: import-shadowing - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + arguments: + - "preserveScope" - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - name: string-format - disabled: false arguments: - - panic - '/^[^\n]*$/' - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - "preserveScope" + - name: time-equal - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - name: unhandled-error - disabled: false arguments: - "fmt.Fprint" - "fmt.Fprintf" @@ -285,12 +236,17 @@ linters-settings: - "fmt.Print" - "fmt.Printf" - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList - name: waitgroup-by-value - disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index fe670d79cc..c076db2823 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,468 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + ## [1.22.0/0.45.0] 2024-01-17 ### Added @@ -22,13 +484,13 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) - Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) - Experimental cardinality limiting is added to the metric SDK. - See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) - Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) ### Changed - Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) -- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) - Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) - Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) @@ -64,7 +526,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -96,15 +558,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -740,7 +1202,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1608,7 +2070,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -1785,7 +2247,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2182,7 +2644,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -2775,7 +3237,22 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 [1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 [1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 @@ -2850,6 +3327,12 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.24]: https://go.dev/doc/go1.24 +[Go 1.23]: https://go.dev/doc/go1.23 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 @@ -2857,3 +3340,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 623740007d..945a07d2b0 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 31857a6173..7b8af585aa 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -181,6 +181,18 @@ patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -201,6 +213,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,12 +582,18 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -613,31 +641,34 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [David Ashpole](https://github.com/dashpole), Google -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +### Approvers ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 35fc189961..226410d742 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -22,11 +11,15 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} GO = go TIMEOUT = 60 +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +27,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +43,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -68,9 +58,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -81,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -98,20 +85,20 @@ PIP := $(PYTOOLS)/pip WORKDIR := /workdir # The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) # Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt +$(PYTOOLS)/%: $(PYTOOLS) + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell @@ -124,19 +111,19 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) +go-work: $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 # Build @@ -159,12 +146,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,7 +167,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -192,40 +181,37 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,25 +220,35 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -263,15 +259,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -282,15 +269,30 @@ check-clean-work-tree: exit 1; \ fi +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +304,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 44e1bfc9b5..8421cd7e59 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,9 +1,11 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -11,14 +13,11 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | -|---------|------------| -| Traces | Stable | -| Metrics | Stable | -| Logs | Design [1] | - -- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). - No Logs Pull Requests are currently being accepted. +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -28,6 +27,8 @@ and Project versioning information and stability guarantees can be found in the [versioning documentation](VERSIONING.md). +[^1]: https://github.com/orgs/open-telemetry/projects/43 + ### Compatibility OpenTelemetry-Go ensures compatibility with the current supported versions of @@ -48,18 +49,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | -| MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.24 | amd64 | +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.24 | 386 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| Ubuntu | 1.22 | arm64 | +| macOS 13 | 1.24 | amd64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.24 | arm64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.24 | amd64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.24 | 386 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -86,8 +98,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export @@ -96,12 +108,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd8..1e13ae54f7 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -5,17 +5,14 @@ New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. -1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.21.0" # Change to the release version you are generating. -export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. ``` This should create a new sub-package of [`semconv`](./semconv). @@ -27,6 +24,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions @@ -63,6 +66,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -104,17 +108,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository @@ -134,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362b..b8cb605c16 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 0000000000..5b3da8f14c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424df..eef51ebc2a 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc5766c..318e42fcab 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d59..be9cd922d8 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271fb7..f2ba89ce4b 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e43..d9a22c6502 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce058..3028f9a40f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 7e6765b06b..6cbefceadf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is @@ -368,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } @@ -427,7 +386,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Set. func (l Set) MarshalLog() interface{} { kvs := make(map[string]string) for _, kv := range l.ToSlice() { diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c09..9ea0ecbbd2 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 0000000000..7d798435e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77d5..0e1fe24220 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -54,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -67,11 +63,15 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -84,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -125,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -148,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -213,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -232,14 +257,18 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -252,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -304,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. - value, err := url.PathUnescape(val) + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(c) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -324,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -341,13 +405,18 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -458,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -479,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -538,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -616,11 +693,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -641,6 +720,113 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,19 +842,10 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { @@ -679,12 +856,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b7564..a572461a05 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100df6..b51d87cab7 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 0000000000..24c52b387d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4e3..49a35b1225 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,21 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -74,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} @@ -94,7 +84,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb4b..ee8db448b8 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 0000000000..e4c4a753c8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,3 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python +FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e88..921f85961a 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of @@ -28,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad85412..67414c71e0 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d37..93e80ea306 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3bbb..07623b6791 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3f2..691d96c755 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. @@ -60,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -74,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -88,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -102,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408e6..b4f85f44a9 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d9c..3aea9c491f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e9e..4259f0320d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b830479..c657ff8e75 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c2067..ae92a42516 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -24,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -51,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -82,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -113,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -144,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -175,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -206,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2b7..adbca7d347 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 0097db478c..a6acd8dca6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,23 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -76,6 +66,8 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -86,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -102,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -120,19 +124,21 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } - for e := m.registry.Front(); e != nil; e = e.Next() { + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { r := e.Value.(*registration) r.setDelegate(meter) + n = e.Next() m.registry.Remove(e) } @@ -141,147 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -293,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -321,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -332,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c2f..38560ff991 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 7985005bcb..204ea142a5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -63,7 +100,7 @@ func SetTracerProvider(tp trace.TracerProvider) { // to itself. Error( errors.New("no delegate configured in tracer provider"), - "Setting tracer provider to it's current value. No delegate will be configured", + "Setting tracer provider to its current value. No delegate will be configured", ) return } @@ -92,7 +129,7 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) { // delegate to itself. Error( errors.New("no delegate configured in text map propagator"), - "Setting text map propagator to it's current value. No delegate will be configured", + "Setting text map propagator to its current value. No delegate will be configured", ) return } @@ -123,7 +160,7 @@ func SetMeterProvider(mp metric.MeterProvider) { // to itself. Error( errors.New("no delegate configured in meter provider"), - "Setting meter provider to it's current value. No delegate will be configured", + "Setting meter provider to its current value. No delegate will be configured", ) return } @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12a3..8982aa0dc5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -36,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -97,6 +87,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -115,6 +107,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T type il struct { name string version string + schema string + attrs attribute.Set } // tracer is a placeholder for a trace.Tracer. @@ -152,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s @@ -193,6 +211,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e794000..b2fe3e41d3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" @@ -31,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -47,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5d8..6de7f2e4d8 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f955171951..1e6473b32f 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 0000000000..0cf902e01f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e8d..f8435d8f28 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string @@ -224,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf020..e079aaef16 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -223,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d748..d9e3b13e4d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13b3..f153745b00 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 0000000000..1f6e0efa73 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2e6..1a9dc68093 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd5334..a535782e1d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c @@ -340,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74af..14e08c24a4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,41 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,7 +91,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -88,7 +106,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -98,23 +121,51 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,7 +175,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -134,7 +190,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -144,6 +205,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -189,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 0000000000..bb89694356 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 0000000000..ca6fcbdc09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae6a..8403a4bad2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32fc..783fdfba77 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32c0..2fd9497338 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 0000000000..e2959ac747 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1cbf..552263ba73 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb2858..33a3baf15f 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f73a..8c8286aab4 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d62221..6870e316dc 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 0000000000..a6fa353f95 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:best-practices" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e1384..1bb55fb1cc 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.4.1 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 0000000000..87b842c5d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f7748d..e087c9c04d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4de..c7b804bbe2 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559de4..137acc67de 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c136..d318221e59 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3a6..7e365e82ce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d16..634a1dce07 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299d2..21497bb6bc 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 0000000000..82e1f46b4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c44d..6685c392b5 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a6962..0d1f55a8fe 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d2b..6377639321 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfdbf..f40c97825a 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede95f..9c1840631b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b906742a..3d44dae275 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3b9..95d0210e38 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 8517741485..90b1b0452c 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 0000000000..2de1fc3c6b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 0000000000..d8dc822b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 0000000000..d031bbea78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 0000000000..bfaee0d56e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 0000000000..fcdb9f4859 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 0000000000..4c87c7adcc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249de8..6836c65478 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 0000000000..58ccaba69b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 0000000000..7e2910025a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,661 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66cf7..9c0b720a4d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -224,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083c4..8c45a7107f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -33,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d7565..cdbf41d6d7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the @@ -107,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 0000000000..7754a239ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a754..3e359a00bf 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 0000000000..f663547b4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 0000000000..5debe90bbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 0000000000..7b1ae3c4ea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 0000000000..f5e3a8cec9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 0000000000..1798a702d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 0000000000..c2b4c635b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 0000000000..3c5e1cdb1b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,460 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 0000000000..1d013a8fa8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 0000000000..b039407081 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 0000000000..7251492da0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb81611..c00221e7be 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491cae..c8b1ae5d67 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,8 +75,29 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 0000000000..cd382c82a1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 0000000000..64a4f1b362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b2260e..d49adf671b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,28 +1,12 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -337,241 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5b7..dc5e34cad0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -271,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index dbb61a4227..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 0000000000..1e87855eea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index c7aba1c3f4..d5fa71f674 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.22.0" + return "1.35.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index a9cfb80ae5..2b4cb4b418 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,32 +1,21 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.22.0 + version: v1.35.0 modules: - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - go.opentelemetry.io/otel/exporters/zipkin - go.opentelemetry.io/otel/metric @@ -34,19 +23,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.45.0 + version: v0.57.0 modules: - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-logs: + version: v0.11.0 + modules: + - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c204..48dbb9d84c 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index feb1157b15..564920bd42 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -6,16 +6,13 @@ package google import ( "context" - "time" + "log" + "sync" "golang.org/x/oauth2" ) -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string +var logOnce sync.Once // only spam about deprecation once // AppEngineTokenSource returns a token source that fetches tokens from either // the current application's service account or from the metadata server, @@ -23,8 +20,10 @@ var appengineAppIDFunc func(c context.Context) string // details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that // involves user accounts, see oauth2.Config instead. // -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the +// The current version of this library requires at least Go 1.17 to build, +// so first generation App Engine runtimes (<= Go 1.9) are unsupported. +// Previously, on first generation App Engine runtimes, AppEngineTokenSource +// returned a token source that fetches tokens issued to the // current App Engine application's service account. The provided context must have // come from appengine.NewContext. // @@ -34,5 +33,8 @@ var appengineAppIDFunc func(c context.Context) string // context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, // which DefaultTokenSource will use in this case) instead. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index e61587945b..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 9c79aa0a0c..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 02ccd08a77..df958359a8 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -22,7 +22,7 @@ import ( const ( adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" - universeDomainDefault = "googleapis.com" + defaultUniverseDomain = "googleapis.com" ) // Credentials holds Google credentials, including "Application Default Credentials". @@ -42,6 +42,17 @@ type Credentials struct { // running on Google Cloud Platform. JSON []byte + // UniverseDomainProvider returns the default service domain for a given + // Cloud universe. Optional. + // + // On GCE, UniverseDomainProvider should return the universe domain value + // from Google Compute Engine (GCE)'s metadata server. See also [The attached service + // account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). + // If the GCE metadata server returns a 404 error, the default universe + // domain value should be returned. If the GCE metadata server returns an + // error other than 404, the error should be returned. + UniverseDomainProvider func() (string, error) + udMu sync.Mutex // guards universeDomain // universeDomain is the default service domain for a given Cloud universe. universeDomain string @@ -58,60 +69,38 @@ type Credentials struct { // See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). func (c *Credentials) UniverseDomain() string { if c.universeDomain == "" { - return universeDomainDefault + return defaultUniverseDomain } return c.universeDomain } // GetUniverseDomain returns the default service domain for a given Cloud -// universe. +// universe. If present, UniverseDomainProvider will be invoked and its return +// value will be cached. // // The default value is "googleapis.com". -// -// It obtains the universe domain from the attached service account on GCE when -// authenticating via the GCE metadata server. See also [The attached service -// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). -// If the GCE metadata server returns a 404 error, the default value is -// returned. If the GCE metadata server returns an error other than 404, the -// error is returned. func (c *Credentials) GetUniverseDomain() (string, error) { c.udMu.Lock() defer c.udMu.Unlock() - if c.universeDomain == "" && metadata.OnGCE() { - // If we're on Google Compute Engine, an App Engine standard second - // generation runtime, or App Engine flexible, use the metadata server. - err := c.computeUniverseDomain() + if c.universeDomain == "" && c.UniverseDomainProvider != nil { + // On Google Compute Engine, an App Engine standard second generation + // runtime, or App Engine flexible, use an externally provided function + // to request the universe domain from the metadata server. + ud, err := c.UniverseDomainProvider() if err != nil { return "", err } + c.universeDomain = ud } - // If not on Google Compute Engine, or in case of any non-error path in - // computeUniverseDomain that did not set universeDomain, set the default - // universe domain. + // If no UniverseDomainProvider (meaning not on Google Compute Engine), or + // in case of any (non-error) empty return value from + // UniverseDomainProvider, set the default universe domain. if c.universeDomain == "" { - c.universeDomain = universeDomainDefault + c.universeDomain = defaultUniverseDomain } return c.universeDomain, nil } -// computeUniverseDomain fetches the default service domain for a given Cloud -// universe from Google Compute Engine (GCE)'s metadata server. It's only valid -// to use this method if your program is running on a GCE instance. -func (c *Credentials) computeUniverseDomain() error { - var err error - c.universeDomain, err = metadata.Get("universe/universe_domain") - if err != nil { - if _, ok := err.(metadata.NotDefinedError); ok { - // http.StatusNotFound (404) - c.universeDomain = universeDomainDefault - return nil - } else { - return err - } - } - return nil -} - // DefaultCredentials is the old name of Credentials. // // Deprecated: use Credentials instead. @@ -199,9 +188,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// 3. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { @@ -224,24 +211,27 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar return CredentialsFromJSONWithParams(ctx, b, params) } - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &Credentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, params.Scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // Third, if we're on Google Compute Engine, an App Engine standard second generation runtime, // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() + universeDomainProvider := func() (string, error) { + universeDomain, err := metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return defaultUniverseDomain, nil + } else { + return "", err + } + } + return universeDomain, nil + } return &Credentials{ - ProjectID: id, - TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), - universeDomain: params.UniverseDomain, + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + UniverseDomainProvider: universeDomainProvider, + universeDomain: params.UniverseDomain, }, nil } @@ -287,7 +277,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // Authorized user credentials are only supported in the googleapis.com universe. if f.Type == userCredentialsKey { - universeDomain = universeDomainDefault + universeDomain = defaultUniverseDomain } ts, err := f.tokenSource(ctx, params) diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 03c42c6f87..830d268c1e 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -22,91 +22,9 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// # Workload Identity Federation +// # Workload and Workforce Identity Federation // -// Using workload identity federation, your application can access Google Cloud -// resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC) or SAML 2.0. -// Traditionally, applications running outside Google Cloud have used service -// account keys to access Google Cloud resources. Using identity federation, -// you can allow your workload to impersonate a service account. -// This lets you access Google Cloud resources directly, eliminating the -// maintenance and security burden associated with service account keys. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws -// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure -// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml -// -// For OIDC and SAML providers, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. -// -// # Workforce Identity Federation -// -// Workforce identity federation lets you use an external identity provider (IdP) to -// authenticate and authorize a workforce—a group of users, such as employees, partners, -// and contractors—using IAM, so that the users can access Google Cloud services. -// Workforce identity federation extends Google Cloud's identity capabilities to support -// syncless, attribute-based single sign on. -// -// With workforce identity federation, your workforce can access Google Cloud resources -// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or -// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation -// Services (AD FS), Okta, and others. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad -// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta -// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml -// -// For workforce identity federation, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in -// -// # Security considerations -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go similarity index 77% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go rename to vendor/golang.org/x/oauth2/google/externalaccount/aws.go index bd4efd19ba..ca27c2e98c 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -26,22 +26,28 @@ import ( "golang.org/x/oauth2" ) -type awsSecurityCredentials struct { - AccessKeyID string `json:"AccessKeyID"` +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` - SecurityToken string `json:"Token"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. type awsRequestSigner struct { RegionName string - AwsSecurityCredentials awsSecurityCredentials + AwsSecurityCredentials *AwsSecurityCredentials } // getenv aliases os.Getenv for testing var getenv = os.Getenv const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + // AWS Signature Version 4 signing algorithm identifier. awsAlgorithm = "AWS4-HMAC-SHA256" @@ -197,8 +203,8 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error { signedRequest.Header.Add("host", requestHost(req)) - if rs.AwsSecurityCredentials.SecurityToken != "" { - signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) } if signedRequest.Header.Get("date") == "" { @@ -251,16 +257,18 @@ func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp } type awsCredentialSource struct { - EnvironmentID string - RegionURL string - RegionalCredVerificationURL string - CredVerificationURL string - IMDSv2SessionTokenURL string - TargetResource string - requestSigner *awsRequestSigner - region string - ctx context.Context - client *http.Client + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions } type awsRequestHeader struct { @@ -292,18 +300,25 @@ func canRetrieveSecurityCredentialFromEnvironment() bool { return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" } -func shouldUseMetadataServer() bool { - return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) } func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } return "aws" } func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } if cs.requestSigner == nil { headers := make(map[string]string) - if shouldUseMetadataServer() { + if cs.shouldUseMetadataServer() { awsSessionToken, err := cs.getAWSSessionToken() if err != nil { return "", err @@ -318,8 +333,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { if err != nil { return "", err } - - if cs.region, err = cs.getRegion(headers); err != nil { + cs.region, err = cs.getRegion(headers) + if err != nil { return "", err } @@ -331,7 +346,7 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) if err != nil { return "", err } @@ -339,8 +354,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // provider, with or without the HTTPS prefix. // Including this header as part of the signature is recommended to // ensure data integrity. - if cs.TargetResource != "" { - req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) } cs.requestSigner.SignRequest(req) @@ -387,11 +402,11 @@ func (cs awsCredentialSource) subjectToken() (string, error) { } func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { - if cs.IMDSv2SessionTokenURL == "" { + if cs.imdsv2SessionTokenURL == "" { return "", nil } - req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) if err != nil { return "", err } @@ -410,25 +425,29 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) } return string(respBody), nil } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } if canRetrieveRegionFromEnvironment() { if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion return envAwsRegion, nil } return getenv("AWS_DEFAULT_REGION"), nil } - if cs.RegionURL == "" { - return "", errors.New("oauth2/google: unable to determine AWS region") + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") } - req, err := http.NewRequest("GET", cs.RegionURL, nil) + req, err := http.NewRequest("GET", cs.regionURL, nil) if err != nil { return "", err } @@ -449,7 +468,7 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) } // This endpoint will return the region in format: us-east-2b. @@ -461,12 +480,15 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } if canRetrieveSecurityCredentialFromEnvironment() { - return awsSecurityCredentials{ + return &AwsSecurityCredentials{ AccessKeyID: getenv(awsAccessKeyId), SecretAccessKey: getenv(awsSecretAccessKey), - SecurityToken: getenv(awsSessionToken), + SessionToken: getenv(awsSessionToken), }, nil } @@ -481,24 +503,23 @@ func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) } if credentials.AccessKeyID == "" { - return result, errors.New("oauth2/google: missing AccessKeyId credential") + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") } if credentials.SecretAccessKey == "" { - return result, errors.New("oauth2/google: missing SecretAccessKey credential") + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") } - return credentials, nil + return &credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { - var result awsSecurityCredentials +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials - req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) if err != nil { return result, err } - req.Header.Add("Content-Type", "application/json") for name, value := range headers { req.Header.Add(name, value) @@ -516,7 +537,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } if resp.StatusCode != 200 { - return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) } err = json.Unmarshal(respBody, &result) @@ -524,11 +545,11 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { - if cs.CredVerificationURL == "" { - return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") } - req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) if err != nil { return "", err } @@ -549,7 +570,7 @@ func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (s } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) } return string(respBody), nil diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 0000000000..6c81a68728 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go similarity index 85% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go index 843d1c3302..dca5681a46 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -39,51 +39,51 @@ func (nce nonCacheableError) Error() string { } func missingFieldError(source, field string) error { - return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) } func jsonParsingError(source, data string) error { - return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) } func malformedFailureError() error { - return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} } func userDefinedError(code, message string) error { - return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} } func unsupportedVersionError(source string, version int) error { - return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) } func tokenExpiredError() error { - return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} } func tokenTypeError(source string) error { - return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) } func exitCodeError(exitCode int) error { - return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) } func executableError(err error) error { - return fmt.Errorf("oauth2/google: executable command failed: %v", err) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) } func executablesDisallowedError() error { - return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") } func timeoutRangeError() error { - return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") } func commandMissingError() error { - return errors.New("oauth2/google: missing `command` field — executable command must be provided") + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") } type environment interface { @@ -146,7 +146,7 @@ type executableCredentialSource struct { // CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. // It also performs defaulting and type conversions. -func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { if ec.Command == "" { return executableCredentialSource{}, commandMissingError() } diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go new file mode 100644 index 0000000000..33766b9722 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" +) + +type fileCredentialSource struct { + File string + Format Format +} + +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + +func (cs fileCredentialSource) subjectToken() (string, error) { + tokenFile, err := os.Open(cs.File) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) + } + defer tokenFile.Close() + tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go similarity index 100% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go rename to vendor/golang.org/x/oauth2/google/externalaccount/header.go diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 0000000000..6c1abdf2da --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go new file mode 100644 index 0000000000..71a7184e01 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "golang.org/x/oauth2" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format Format + ctx context.Context +} + +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(respBody), nil + case "": + return string(respBody), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index c66c53527d..7b82e7a083 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -15,8 +15,9 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/externalaccount" "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" "golang.org/x/oauth2/jwt" ) @@ -200,12 +201,12 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, ClientSecret: f.ClientSecret, ClientID: f.ClientID, - CredentialSource: f.CredentialSource, + CredentialSource: &f.CredentialSource, QuotaProjectID: f.QuotaProjectID, Scopes: params.Scopes, WorkforcePoolUserProject: f.WorkforcePoolUserProject, } - return cfg.TokenSource(ctx) + return externalaccount.NewTokenSource(ctx, *cfg) case externalAccountAuthorizedUserKey: cfg := &externalaccountauthorizeduser.Config{ Audience: f.Audience, @@ -228,7 +229,7 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar if err != nil { return nil, err } - imp := externalaccount.ImpersonateTokenSource{ + imp := impersonate.ImpersonateTokenSource{ Ctx: ctx, URL: f.ServiceAccountImpersonationURL, Scopes: params.Scopes, @@ -251,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) } func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go deleted file mode 100644 index 33288d3677..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "fmt" - "net/http" - "regexp" - "strconv" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/stsexchange" -) - -// now aliases time.Now for testing -var now = func() time.Time { - return time.Now().UTC() -} - -// Config stores the configuration for fetching tokens with external credentials. -type Config struct { - // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload - // identity pool or the workforce pool and the provider identifier in that pool. - Audience string - // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec - // e.g. `urn:ietf:params:oauth:token-type:jwt`. - SubjectTokenType string - // TokenURL is the STS token exchange endpoint. - TokenURL string - // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( - // user attributes like account identifier, eg. email, username, uid, etc). This is - // needed for gCloud session account identification. - TokenInfoURL string - // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only - // required for workload identity pools when APIs to be accessed have not integrated with UberMint. - ServiceAccountImpersonationURL string - // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation - // token will be valid for. - ServiceAccountImpersonationLifetimeSeconds int - // ClientSecret is currently only required if token_info endpoint also - // needs to be called with the generated GCP access token. When provided, STS will be - // called with additional basic authentication using client_id as username and client_secret as password. - ClientSecret string - // ClientID is only required in conjunction with ClientSecret, as described above. - ClientID string - // CredentialSource contains the necessary information to retrieve the token itself, as well - // as some environmental information. - CredentialSource CredentialSource - // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries - // will set the x-goog-user-project which overrides the project associated with the credentials. - QuotaProjectID string - // Scopes contains the desired scopes for the returned access token. - Scopes []string - // The optional workforce pool user project number when the credential - // corresponds to a workforce pool and not a workload identity pool. - // The underlying principal must still have serviceusage.services.use IAM - // permission to use the project for billing/quota. - WorkforcePoolUserProject string -} - -var ( - validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) -) - -func validateWorkforceAudience(input string) bool { - return validWorkforceAudiencePattern.MatchString(input) -} - -// TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. -func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, "https") -} - -// tokenSource is a private function that's directly called by some of the tests, -// because the unit test URLs are mocked, and would otherwise fail the -// validity check. -func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { - if c.WorkforcePoolUserProject != "" { - valid := validateWorkforceAudience(c.Audience) - if !valid { - return nil, fmt.Errorf("oauth2/google: workforce_pool_user_project should not be set for non-workforce pool credentials") - } - } - - ts := tokenSource{ - ctx: ctx, - conf: c, - } - if c.ServiceAccountImpersonationURL == "" { - return oauth2.ReuseTokenSource(nil, ts), nil - } - scopes := c.Scopes - ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} - imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), - TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, - } - return oauth2.ReuseTokenSource(nil, imp), nil -} - -// Subject token file types. -const ( - fileTypeText = "text" - fileTypeJSON = "json" -) - -type format struct { - // Type is either "text" or "json". When not provided "text" type is assumed. - Type string `json:"type"` - // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. - SubjectTokenFieldName string `json:"subject_token_field_name"` -} - -// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. -// The EnvironmentID should start with AWS if being used for an AWS credential. -type CredentialSource struct { - File string `json:"file"` - - URL string `json:"url"` - Headers map[string]string `json:"headers"` - - Executable *ExecutableConfig `json:"executable"` - - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format format `json:"format"` -} - -type ExecutableConfig struct { - Command string `json:"command"` - TimeoutMillis *int `json:"timeout_millis"` - OutputFile string `json:"output_file"` -} - -// parse determines the type of CredentialSource needed. -func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { - if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { - if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { - if awsVersion != 1 { - return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) - } - - awsCredSource := awsCredentialSource{ - EnvironmentID: c.CredentialSource.EnvironmentID, - RegionURL: c.CredentialSource.RegionURL, - RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, - CredVerificationURL: c.CredentialSource.URL, - TargetResource: c.Audience, - ctx: ctx, - } - if c.CredentialSource.IMDSv2SessionTokenURL != "" { - awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL - } - - return awsCredSource, nil - } - } else if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil - } else if c.CredentialSource.URL != "" { - return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil - } else if c.CredentialSource.Executable != nil { - return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) - } - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") -} - -type baseCredentialSource interface { - credentialSourceType() string - subjectToken() (string, error) -} - -// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. -type tokenSource struct { - ctx context.Context - conf *Config -} - -func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { - return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", - goVersion(), - "unknown", - credSource.credentialSourceType(), - conf.ServiceAccountImpersonationURL != "", - conf.ServiceAccountImpersonationLifetimeSeconds != 0) -} - -// Token allows tokenSource to conform to the oauth2.TokenSource interface. -func (ts tokenSource) Token() (*oauth2.Token, error) { - conf := ts.conf - - credSource, err := conf.parse(ts.ctx) - if err != nil { - return nil, err - } - subjectToken, err := credSource.subjectToken() - - if err != nil { - return nil, err - } - stsRequest := stsexchange.TokenExchangeRequest{ - GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", - Audience: conf.Audience, - Scope: conf.Scopes, - RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", - SubjectToken: subjectToken, - SubjectTokenType: conf.SubjectTokenType, - } - header := make(http.Header) - header.Add("Content-Type", "application/x-www-form-urlencoded") - header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) - clientAuth := stsexchange.ClientAuthentication{ - AuthStyle: oauth2.AuthStyleInHeader, - ClientID: conf.ClientID, - ClientSecret: conf.ClientSecret, - } - var options map[string]interface{} - // Do not pass workforce_pool_user_project when client authentication is used. - // The client ID is sufficient for determining the user project. - if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { - options = map[string]interface{}{ - "userProject": conf.WorkforcePoolUserProject, - } - } - stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) - if err != nil { - return nil, err - } - - accessToken := &oauth2.Token{ - AccessToken: stsResp.AccessToken, - TokenType: stsResp.TokenType, - } - if stsResp.ExpiresIn < 0 { - return nil, fmt.Errorf("oauth2/google: got invalid expiry from security token service") - } else if stsResp.ExpiresIn >= 0 { - accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) - } - - if stsResp.RefreshToken != "" { - accessToken.RefreshToken = stsResp.RefreshToken - } - return accessToken, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go deleted file mode 100644 index 233a78cef2..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import "fmt" - -// Error for handling OAuth related error responses as stated in rfc6749#5.2. -type Error struct { - Code string - URI string - Description string -} - -func (err *Error) Error() string { - return fmt.Sprintf("got error code %s from %s: %s", err.Code, err.URI, err.Description) -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go deleted file mode 100644 index f35f73c5cb..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" -) - -type fileCredentialSource struct { - File string - Format format -} - -func (cs fileCredentialSource) credentialSourceType() string { - return "file" -} - -func (cs fileCredentialSource) subjectToken() (string, error) { - tokenFile, err := os.Open(cs.File) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File) - } - defer tokenFile.Close() - tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err) - } - tokenBytes = bytes.TrimSpace(tokenBytes) - switch cs.Format.Type { - case "json": - jsonData := make(map[string]interface{}) - err = json.Unmarshal(tokenBytes, &jsonData) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) - } - val, ok := jsonData[cs.Format.SubjectTokenFieldName] - if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") - } - token, ok := val.(string) - if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") - } - return token, nil - case "text": - return string(tokenBytes), nil - case "": - return string(tokenBytes), nil - default: - return "", errors.New("oauth2/google: invalid credential_source file format type") - } - -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go deleted file mode 100644 index 54c8f209f3..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "golang.org/x/oauth2" -) - -// generateAccesstokenReq is used for service account impersonation -type generateAccessTokenReq struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type impersonateTokenResponse struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. -// Scopes can be defined when the access token is requested. -type ImpersonateTokenSource struct { - // Ctx is the execution context of the impersonation process - // used to perform http call to the URL. Required - Ctx context.Context - // Ts is the source credential used to generate a token on the - // impersonated service account. Required. - Ts oauth2.TokenSource - - // URL is the endpoint to call to generate a token - // on behalf the service account. Required. - URL string - // Scopes that the impersonated credential should have. Required. - Scopes []string - // Delegates are the service account email addresses in a delegation chain. - // Each service account must be granted roles/iam.serviceAccountTokenCreator - // on the next service account in the chain. Optional. - Delegates []string - // TokenLifetimeSeconds is the number of seconds the impersonation token will - // be valid for. - TokenLifetimeSeconds int -} - -// Token performs the exchange to get a temporary service account token to allow access to GCP. -func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { - lifetimeString := "3600s" - if its.TokenLifetimeSeconds != 0 { - lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) - } - reqBody := generateAccessTokenReq{ - Lifetime: lifetimeString, - Scope: its.Scopes, - Delegates: its.Delegates, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) - } - client := oauth2.NewClient(its.Ctx, its.Ts) - req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) - } - req = req.WithContext(its.Ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) - } - - var accessTokenResp impersonateTokenResponse - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) - } - return &oauth2.Token{ - AccessToken: accessTokenResp.AccessToken, - Expiry: expiry, - TokenType: "Bearer", - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go deleted file mode 100644 index 606bb4e800..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "golang.org/x/oauth2" -) - -type urlCredentialSource struct { - URL string - Headers map[string]string - Format format - ctx context.Context -} - -func (cs urlCredentialSource) credentialSourceType() string { - return "url" -} - -func (cs urlCredentialSource) subjectToken() (string, error) { - client := oauth2.NewClient(cs.ctx, nil) - req, err := http.NewRequest("GET", cs.URL, nil) - if err != nil { - return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) - } - req = req.WithContext(cs.ctx) - - for key, val := range cs.Headers { - req.Header.Add(key, val) - } - resp, err := client.Do(req) - if err != nil { - return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) - } - - switch cs.Format.Type { - case "json": - jsonData := make(map[string]interface{}) - err = json.Unmarshal(respBody, &jsonData) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) - } - val, ok := jsonData[cs.Format.SubjectTokenFieldName] - if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") - } - token, ok := val.(string) - if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") - } - return token, nil - case "text": - return string(respBody), nil - case "": - return string(respBody), nil - default: - return "", errors.New("oauth2/google: invalid credential_source file format type") - } - -} diff --git a/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go new file mode 100644 index 0000000000..6bc3af1103 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +type ImpersonateTokenSource struct { + // Ctx is the execution context of the impersonation process + // used to perform http call to the URL. Required + Ctx context.Context + // Ts is the source credential used to generate a token on the + // impersonated service account. Required. + Ts oauth2.TokenSource + + // URL is the endpoint to call to generate a token + // on behalf the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetimeString, + Scope: its.Scopes, + Delegates: its.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.Ctx, its.Ts) + req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.Ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index d28140f789..0000000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a637..b9db01ddfd 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 90a2c3d6dc..09f6a49b80 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598ae..a4ea5d14f1 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b63..0000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434e..0000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000000..fd8632444e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000000..bbf86ccf0c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000000..f533091c19 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000000..74db26b94d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000000..fc1835d8a2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493f..93a798ab63 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 2c4c4e2328..6e34df4613 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) } // Does [start, end) overlap multiple children? @@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node { // // As a workaround, we inline the case for FuncType // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. // children = nil // discard ast.Walk(FuncDecl) info subtrees children = append(children, tok(n.Type.Func, len("func"))) @@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string { } panic(fmt.Sprintf("unexpected node type: %T", n)) } + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 18d1adb05d..a6b5ed0a89 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f7663..5c8dbbb7a3 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -183,7 +183,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 919d5305ab..c820b20849 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,12 +7,7 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} +// Deprecated: use [ast.Unparen]. +// +//go:fix inline +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1d..65fe2628e9 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies @@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index 333676b7cf..0000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a8d7b06ac0..f1931d10ee 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" @@ -198,14 +207,6 @@ Instead, ssadump no longer requests the runtime package, but seeks it among the dependencies of the user-specified packages, and emits an error if it is not found. -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - Questions & Tasks - Add GOARCH/GOOS? diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 4335c1eb14..91bd62e83b 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -34,8 +35,8 @@ type DriverRequest struct { // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). Overlay map[string][]byte `json:"overlay"` } @@ -79,10 +80,10 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { @@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,9 +118,21 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir - cmd.Env = cfg.Env + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 22305d9c90..0458b4f9c4 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,7 +21,6 @@ import ( "sync" "unicode" - "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -81,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -312,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -495,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -682,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -752,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -760,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -785,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -800,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } @@ -841,6 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, + Overlay: state.overlay, } } @@ -849,33 +866,10 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, cfg := state.cfg inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -899,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // @@ -1015,67 +1015,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return stdout, nil } -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := os.MkdirTemp("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := os.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { @@ -1104,3 +1043,44 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b5..69eec9f44d 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,48 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 3ea1b3fa46..6665a04c17 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,23 +31,45 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. // The bits below can be combined to specify which fields should be // filled in the result packages. +// // The zero value is a special case, equivalent to combining // the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// // ID and Errors (if present) will always be filled. -// Load may return more information than requested. +// [Load] may return more information than requested. +// +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -66,10 +88,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -78,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -94,36 +117,39 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // NeedTarget adds Target. + NeedTarget + + // Be sure to update loadmode_string.go when adding new items! ) const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadFiles loads the name and file names for the initial packages. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadImports loads the name, file names, and import mapping for the initial packages. LoadImports = LoadFiles | NeedImports - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadTypes loads exported type information for the initial packages. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadSyntax loads typed syntax for the initial packages. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. -// Calls to Load do not modify this struct. +// +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -137,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -154,19 +180,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -199,19 +216,46 @@ type Config struct { // setting Tests may have no effect. Tests bool - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. + // Overlay is a mapping from absolute file paths to file contents. + // + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. Overlay map[string][]byte + + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. +// +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. // // If any of the patterns was invalid as defined by the // underlying build system, Load returns an error. @@ -220,7 +264,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -283,10 +327,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through + } + + // go list fallback + + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err } + defer cleanupOverlay() - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -324,16 +382,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -365,6 +421,9 @@ func mergeResponses(responses ...*DriverResponse) *DriverResponse { } // A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Package struct { // ID is a unique identifier for a package, // in a syntax provided by the underlying build system. @@ -380,6 +439,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -419,10 +484,21 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + // Types provides type information for the package. // The NeedTypes LoadMode bit sets this field for packages matching the // patterns; type information for dependencies may be missing or incomplete, @@ -431,15 +507,15 @@ type Package struct { // Each call to [Load] returns a consistent set of type // symbols, as defined by the comment at [types.Identical]. // Avoid mixing type information from two or more calls to [Load]. - Types *types.Package + Types *types.Package `json:"-"` // Fset provides position information for Types, TypesInfo, and Syntax. // It is set only when Types is set. - Fset *token.FileSet + Fset *token.FileSet `json:"-"` // IllTyped indicates whether the package or any dependency contains errors. // It is set only when Types is set. - IllTyped bool + IllTyped bool `json:"-"` // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. // @@ -449,26 +525,28 @@ type Package struct { // // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File + Syntax []*ast.File `json:"-"` // TypesInfo provides type information about the package's syntax trees. // It is set only when Syntax is set. - TypesInfo *types.Info + TypesInfo *types.Info `json:"-"` // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes + TypesSizes types.Sizes `json:"-"` + + // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module } // Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Module struct { Path string // module path Version string // module version @@ -488,21 +566,17 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -601,6 +675,7 @@ func (p *Package) UnmarshalJSON(b []byte) error { OtherFiles: flat.OtherFiles, EmbedFiles: flat.EmbedFiles, EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -617,18 +692,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -665,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { @@ -674,9 +750,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -690,7 +763,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -699,6 +772,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -730,7 +804,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -755,9 +829,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -776,63 +851,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -845,16 +933,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -896,12 +1013,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -916,31 +1035,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -976,6 +1074,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1081,7 +1183,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1092,16 +1194,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1154,6 +1260,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1218,8 +1328,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1236,20 +1349,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1264,18 +1385,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1434,6 +1558,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } @@ -1442,4 +1570,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b72..df14ffd94d 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a2386c347a..16ed3c1780 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,19 +98,21 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { + return r + } + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) + } + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { + path2 := appendOpArg(path, op, i) + if r := f.find(tparam, path2); r != nil { return r } } @@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFM] have an integer operand. + // Codes [AFMTr] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam: + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { @@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 0000000000..754380351e --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 0000000000..b81ce0c330 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 0000000000..b6d542c64e --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,475 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "hash/maphash" + "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. +type Map struct { + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher has no effect. +// +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + hash := hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +// -- Hasher -- + +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) +} + +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + return hasher{inGenericSig: false}.hash(t) +} + +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + tparams := t.TypeParams() + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results + + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) + + case *types.Named: + hash := h.hashTypeName(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) + } + return hash +} + +func (h hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) + } + return 9173 + 3*uint32(t.Index()) +} + +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashTypeName(t.Obj()) + + case *types.TypeParam: + return h.hashTypeParam(t) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 0000000000..f7666028fe --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 0000000000..9dda6a25df --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee45..b9425f5a20 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f315..0000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b329954841..7716a3331d 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,31 +11,51 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) +} + +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -56,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e301..4cfa51b612 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9a..7c00ca2a6d 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -32,7 +32,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +76,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a9..734c46198d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1c..5662a311da 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,49 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) return } - name = strings.TrimSpace(string(hdr[:16])) + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } + + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") + return + } + + return +} - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { return } + if string(line) == "$$" { + return // stop + } - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + // read next header + line, err = r.ReadSlice('\n') + if err != nil { return } - size -= int64(len(line)) + headers = append(headers, string(line)) } +} - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { return } - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { return } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return } + n = len(hdr) + 1 // + 1 is for 'u' return } + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124a..3dbd21d1b9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,125 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string - var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + data, err := ReadUnified(buf) + if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f315..253d6493c2 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,11 +242,30 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -73,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -360,7 +597,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -1327,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa03653..bc6c9741e7 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,9 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. +// See iexport.go for the export data format. package gcimporter @@ -53,6 +51,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -401,7 +400,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -540,7 +539,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,19 +556,28 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) case constTag: typ, val := r.value() @@ -589,6 +597,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +626,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +656,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -660,7 +671,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) @@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1098,3 +1111,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 0000000000..7586bfaca6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d05..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40fd..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 0000000000..907c8557a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 0000000000..4af810dc41 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b65..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cada..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3a..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c07706887..37b4a39e9e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,10 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -72,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -110,13 +108,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +167,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -263,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -471,7 +478,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +534,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,14 +566,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) @@ -607,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } @@ -632,7 +648,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +745,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index eb7a8282f9..7ea9013447 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,13 +8,14 @@ package gocommand import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" "log" "os" "os/exec" - "reflect" + "path/filepath" "regexp" "runtime" "strconv" @@ -27,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -167,7 +168,9 @@ type Invocation struct { // TODO(rfindley): remove, in favor of Args. ModFile string - // If Overlay is set, the go command is invoked with -overlay=Overlay. + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. // TODO(rfindley): remove, in favor of Args. Overlay string @@ -176,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -196,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) } +} +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { goArgs := []string{i.Verb} appendModFile := func() { @@ -244,23 +249,23 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } - - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second + + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. if !i.CleanEnv { cmd.Env = os.Environ() } @@ -270,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() return runCmdContext(ctx, cmd) } @@ -351,6 +361,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } } + startTime := time.Now() err = cmd.Start() if stdoutW != nil { // The child process has inherited the pipe file, @@ -377,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(cmd.Process) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -402,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -411,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(proc *os.Process) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -427,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -441,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { @@ -468,3 +495,73 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 0000000000..469c648e4d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 0000000000..169d37c8e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 8361515519..984b79c2a0 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -22,7 +22,7 @@ import ( // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool @@ -81,7 +81,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if opts.Logf == nil { - opts.Logf = func(format string, args ...interface{}) {} + opts.Logf = func(format string, args ...any) {} } if _, err := os.Stat(root.Path); os.IsNotExist(err) { opts.Logf("skipping nonexistent directory: %v", root.Path) diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 93d49a6efd..737a9bfae8 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -90,21 +90,12 @@ type ImportFix struct { Relevance float64 // see pkg } -// An ImportInfo represents a single import statement. -type ImportInfo struct { - ImportPath string // import path, e.g. "crypto/rand". - Name string // import name, e.g. "crand", or "" if none. -} - -// A packageInfo represents what's known about a package. -type packageInfo struct { - name string // real package name, if known. - exports map[string]bool // known exports. -} - // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. -func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { +// +// It returns an error only if ctx is cancelled. Files with parse errors are +// ignored. +func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename string) ([]*ast.File, error) { // This could use go/packages but it doesn't buy much, and it fails // with https://golang.org/issue/26296 in LoadFiles mode in some cases. considerTests := strings.HasSuffix(filename, "_test.go") @@ -112,11 +103,14 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { fileBase := filepath.Base(filename) packageFileInfos, err := os.ReadDir(srcDir) if err != nil { - return nil + return nil, ctx.Err() } var files []*ast.File for _, fi := range packageFileInfos { + if ctx.Err() != nil { + return nil, ctx.Err() + } if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { continue } @@ -124,7 +118,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { continue } - f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) if err != nil { continue } @@ -132,7 +126,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { files = append(files, f) } - return files + return files, ctx.Err() } // addGlobals puts the names of package vars into the provided map. @@ -155,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) { // collectReferences builds a map of selector expressions, from // left hand side (X) to a set of right hand sides (Sel). -func collectReferences(f *ast.File) references { - refs := references{} +func collectReferences(f *ast.File) References { + refs := References{} var visitor visitFn visitor = func(node ast.Node) ast.Visitor { @@ -226,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { allFound := true for right := range syms { - if !pkgInfo.exports[right] { + if !pkgInfo.Exports[right] { allFound = false break } @@ -239,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { return nil } -// references is set of references found in a Go file. The first map key is the -// left hand side of a selector expression, the second key is the right hand -// side, and the value should always be true. -type references map[string]map[string]bool - // A pass contains all the inputs and state necessary to fix a file's imports. // It can be modified in some ways during use; see comments below. type pass struct { @@ -251,27 +240,29 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - env *ProcessEnv // the environment to use for go commands, etc. - loadRealPackageNames bool // if true, load package names from disk rather than guessing them. - otherFiles []*ast.File // sibling files. + logf func(string, ...any) + source Source // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + goroot string // Intermediate state, generated by load. existingImports map[string][]*ImportInfo - allRefs references - missingRefs references + allRefs References + missingRefs References // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. candidates []*ImportInfo // candidate imports in priority order. - knownPackages map[string]*packageInfo // information about all known packages. + knownPackages map[string]*PackageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Logf != nil { - p.env.Logf("loading package names for %v packages", len(imports)) +func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { + if p.logf != nil { + p.logf("loading package names for %v packages", len(imports)) defer func() { - p.env.Logf("done loading package names for %v packages", len(imports)) + p.logf("done loading package names for %v packages", len(imports)) }() } var unknown []string @@ -282,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - resolver, err := p.env.GetResolver() - if err != nil { - return err - } - - names, err := resolver.loadPackageNames(unknown, p.srcDir) + names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) if err != nil { return err } + // TODO(rfindley): revisit this. Why do we need to store known packages with + // no exports? The inconsistent data is confusing. for path, name := range names { - p.knownPackages[path] = &packageInfo{ - name: name, - exports: map[string]bool{}, + p.knownPackages[path] = &PackageInfo{ + Name: name, + Exports: map[string]bool{}, } } return nil @@ -323,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { return imp.Name } known := p.knownPackages[imp.ImportPath] - if known != nil && known.name != "" { - return withoutVersion(known.name) + if known != nil && known.Name != "" { + return withoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -332,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load() ([]*ImportFix, bool) { - p.knownPackages = map[string]*packageInfo{} - p.missingRefs = references{} +func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { + p.knownPackages = map[string]*PackageInfo{} + p.missingRefs = References{} p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. @@ -357,10 +345,10 @@ func (p *pass) load() ([]*ImportFix, bool) { // f's imports by the identifier they introduce. imports := collectImports(p.f) if p.loadRealPackageNames { - err := p.loadPackageNames(append(imports, p.candidates...)) + err := p.loadPackageNames(ctx, append(imports, p.candidates...)) if err != nil { - if p.env.Logf != nil { - p.env.Logf("loading package names: %v", err) + if p.logf != nil { + p.logf("loading package names: %v", err) } return nil, false } @@ -530,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() { // We have the stdlib in memory; no need to guess. rights = symbolNameSet(m) } - p.addCandidate(imp, &packageInfo{ + // TODO(rfindley): we should set package name here, for consistency. + p.addCandidate(imp, &PackageInfo{ // no name; we already know it. - exports: rights, + Exports: rights, }) } } @@ -541,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { p.candidates = append(p.candidates, imp) if existing, ok := p.knownPackages[imp.ImportPath]; ok { - if existing.name == "" { - existing.name = pkg.name + if existing.Name == "" { + existing.Name = pkg.Name } - for export := range pkg.exports { - existing.exports[export] = true + for export := range pkg.Exports { + existing.Exports[export] = true } } else { p.knownPackages[imp.ImportPath] = pkg @@ -560,6 +549,8 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { // // This is declared as a variable rather than a function so goimports can // easily be extended by adding a file with an init function. +// +// DO NOT REMOVE: used internally at Google. var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { @@ -568,36 +559,60 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return err } apply(fset, f, fixes) - return err + return nil } // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + source, err := NewProcessEnvSource(env, filename, f.Name.Name) + if err != nil { + return nil, err + } + goEnv, err := env.goEnv() + if err != nil { + return nil, err + } + return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) +} + +func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { + // This logic is defensively duplicated from getFixes. abs, err := filepath.Abs(filename) if err != nil { return nil, err } srcDir := filepath.Dir(abs) - if env.Logf != nil { - env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + + if logf != nil { + logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) } // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} - if fixes, done := p.load(); done { + p := &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: source, + } + if fixes, done := p.load(ctx); done { return fixes, nil } - otherFiles := parseOtherFiles(fset, srcDir, filename) + otherFiles, err := parseOtherFiles(ctx, fset, srcDir, filename) + if err != nil { + return nil, err + } // Second pass: add information from other files in the same package, // like their package vars and imports. p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -610,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Third pass: get real package names where we had previously used // the naive algorithm. - p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p = &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: p.source, // safe to reuse, as it's just a wrapper around env + } p.loadRealPackageNames = true p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -758,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Try the assumed package name first, then a simpler path match @@ -793,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } return strings.HasPrefix(pkg.importPathShort, searchPrefix) @@ -827,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return true }, dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) }, packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg @@ -905,7 +927,7 @@ type ProcessEnv struct { WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // If set, ModCache holds a shared cache of directory info to use across // multiple ProcessEnvs. @@ -1008,18 +1030,28 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // // For gopls, we can optionally explicitly choose a resolver type, since we // already know the view type. - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { e.resolver = newGopathResolver(e) + e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { e.resolverErr = err + e.logf("failed to create module resolver: %v", err) } else { e.resolver = Resolver(r) + e.logf("created module resolver") } } return e.resolver, e.resolverErr } +// logf logs if e.Logf is non-nil. +func (e *ProcessEnv) logf(format string, args ...any) { + if e.Logf != nil { + e.Logf(format, args...) + } +} + // buildContext returns the build.Context to use for matching files. // // TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform @@ -1068,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) error { - goenv, err := pass.env.goEnv() - if err != nil { - return err - } +func addStdlibCandidates(pass *pass, refs References) error { localbase := func(nm string) string { ans := path.Base(nm) if ans[0] == 'v' { @@ -1087,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error { } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { return } exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: localbase(pkg), exports: exports}) + &PackageInfo{Name: localbase(pkg), Exports: exports}) } for left := range refs { if left == "rand" { @@ -1104,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs references) error { // but we have no way of figuring out what the user is using // TODO: investigate using the toolchain version to disambiguate in the stdlib add("math/rand/v2") + // math/rand has an overlapping API + // TestIssue66407 fails without this + add("math/rand") continue } for importPath := range stdlib.PackageSymbols { @@ -1123,8 +1154,8 @@ type Resolver interface { // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error - // loadExports returns the set of exported symbols in the package at dir. - // loadExports may be called concurrently. + // loadExports returns the package name and set of exported symbols in the + // package at dir. loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) // scoreImportPath returns the relevance for an import path. @@ -1157,101 +1188,22 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } -func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { ctx, done := event.Start(ctx, "imports.addExternalCandidates") defer done() - var mu sync.Mutex - found := make(map[string][]pkgDistance) - callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true // We want everything. - }, - dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, refs, pkg) - }, - packageNameLoaded: func(pkg *pkg) bool { - if _, want := refs[pkg.packageName]; !want { - return false - } - if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - return false - } - if !canUse(filename, pkg.dir) { - return false - } - mu.Lock() - defer mu.Unlock() - found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) - return false // We'll do our own loading after we sort. - }, - } - resolver, err := pass.env.GetResolver() + results, err := pass.source.ResolveReferences(ctx, filename, refs) if err != nil { return err } - if err = resolver.scan(context.Background(), callback); err != nil { - return err - } - - // Search for imports matching potential package references. - type result struct { - imp *ImportInfo - pkg *packageInfo - } - results := make(chan result, len(refs)) - - ctx, cancel := context.WithCancel(context.TODO()) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - var ( - firstErr error - firstErrOnce sync.Once - ) - for pkgName, symbols := range refs { - wg.Add(1) - go func(pkgName string, symbols map[string]bool) { - defer wg.Done() - - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) - - if err != nil { - firstErrOnce.Do(func() { - firstErr = err - cancel() - }) - return - } - - if found == nil { - return // No matching package. - } - - imp := &ImportInfo{ - ImportPath: found.importPathShort, - } - - pkg := &packageInfo{ - name: pkgName, - exports: symbols, - } - results <- result{imp, pkg} - }(pkgName, symbols) - } - go func() { - wg.Wait() - close(results) - }() - for result := range results { + for _, result := range results { + if result == nil { + continue + } // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. - if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + if types.Universe.Lookup(result.Package.Name) != nil { // predeclared // Ideally we would skip this candidate only // if the predeclared name is actually // referenced by the file, but that's a lot @@ -1260,9 +1212,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil // user before long. continue } - pass.addCandidate(result.imp, result.pkg) + pass.addCandidate(result.Import, result.Package) } - return firstErr + return nil } // notIdentifier reports whether ch is an invalid identifier character. @@ -1604,11 +1556,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } fullFile := filepath.Join(dir, fi.Name()) + // Legacy ast.Object resolution is needed here. f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - if env.Logf != nil { - env.Logf("error parsing %v: %v", fullFile, err) - } + env.logf("error parsing %v: %v", fullFile, err) continue } if f.Name.Name == "documentation" { @@ -1644,9 +1595,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } sortSymbols(exports) - if env.Logf != nil { - env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) - } + env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) return pkgName, exports, nil } @@ -1656,25 +1605,39 @@ func sortSymbols(syms []stdlib.Symbol) { }) } -// findImport searches for a package with the given symbols. -// If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { +// A symbolSearcher searches for a package with a set of symbols, among a set +// of candidates. See [symbolSearcher.search]. +// +// The search occurs within the scope of a single file, with context captured +// in srcDir and xtest. +type symbolSearcher struct { + logf func(string, ...any) + srcDir string // directory containing the file + xtest bool // if set, the file containing is an x_test file + loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) +} + +// search searches the provided candidates for a package containing all +// exported symbols. +// +// If successful, returns the resulting package. +func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Logf != nil { + if s.logf != nil { for i, c := range candidates { - pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } - resolver, err := pass.env.GetResolver() - if err != nil { - return nil, err - } - // Collect exports for packages with matching names. + // Arrange rescv so that we can we can await results in order of relevance + // and exit as soon as we find the first match. + // + // Search with bounded concurrency, returning as soon as the first result + // among rescv is non-nil. rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1682,6 +1645,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa const maxConcurrentPackageImport = 4 loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + // Ensure that all work is completed at exit. ctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup defer func() { @@ -1689,6 +1653,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa wg.Wait() }() + // Start the search. wg.Add(1) go func() { defer wg.Done() @@ -1699,55 +1664,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa return } + i := i + c := c wg.Add(1) - go func(c pkgDistance, resc chan<- *pkg) { + go func() { defer func() { <-loadExportsSem wg.Done() }() - - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + if s.logf != nil { + s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - // If we're an x_test, load the package under test's test variant. - includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + pkg, err := s.searchOne(ctx, c, symbols) if err != nil { - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + if s.logf != nil && ctx.Err() == nil { + s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } - resc <- nil - return + pkg = nil } - - exportsMap := make(map[string]bool, len(exports)) - for _, sym := range exports { - exportsMap[sym.Name] = true - } - - // If it doesn't have the right - // symbols, send nil to mean no match. - for symbol := range symbols { - if !exportsMap[symbol] { - resc <- nil - return - } - } - resc <- c.pkg - }(c, rescv[i]) + rescv[i] <- pkg // may be nil + }() } }() + // Await the first (best) result. for _, resc := range rescv { - pkg := <-resc - if pkg == nil { - continue + select { + case r := <-resc: + if r != nil { + return r, nil + } + case <-ctx.Done(): + return nil, ctx.Err() } - return pkg, nil } return nil, nil } +func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + // If we're considering the package under test from an x_test, load the + // test variant. + includeTest := s.xtest && c.pkg.dir == s.srcDir + _, exports, err := s.loadExports(ctx, c.pkg, includeTest) + if err != nil { + return nil, err + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + for symbol := range symbols { + if !exportsMap[symbol] { + return nil, nil // no match + } + } + return c.pkg, nil +} + // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1760,68 +1737,34 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Speed optimization to minimize disk I/O: - // the last two components on disk must contain the - // package name somewhere. // - // This permits mismatch naming like directory - // "go-foo" being package "foo", or "pkg.v3" being "pkg", - // or directory "google.golang.org/api/cloudbilling/v1" - // being package "cloudbilling", but doesn't - // permit a directory "foo" to be package - // "bar", which is strongly discouraged - // anyway. There's no reason goimports needs - // to be slow just to accommodate that. + // Use the matchesPath heuristic to filter to package paths that could + // reasonably match a dangling reference. + // + // This permits mismatch naming like directory "go-foo" being package "foo", + // or "pkg.v3" being "pkg", or directory + // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but + // doesn't permit a directory "foo" to be package "bar", which is strongly + // discouraged anyway. There's no reason goimports needs to be slow just to + // accommodate that. for pkgIdent := range refs { - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { + if matchesPath(pkgIdent, pkg.importPathShort) { return true } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - } } return false } -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) -} - -// canUse reports whether the package in dir is usable from filename, +// CanUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. -func canUse(filename, dir string) bool { +func CanUse(filename, dir string) bool { // Fast path check, before any allocations. If it doesn't contain vendor // or internal, it's not tricky: // Note that this can false-negative on directories like "notinternal", @@ -1859,19 +1802,84 @@ func canUse(filename, dir string) bool { return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") } -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { +// matchesPath reports whether ident may match a potential package name +// referred to by path, using heuristics to filter out unidiomatic package +// names. +// +// Specifically, it checks whether either of the last two '/'- or '\'-delimited +// path segments matches the identifier. The segment-matching heuristic must +// allow for various conventions around segment naming, including go-foo, +// foo-go, and foo.v3. To handle all of these, matching considers both (1) the +// entire segment, ignoring '-' and '.', as well as (2) the last subsegment +// separated by '-' or '.'. So the segment foo-go matches all of the following +// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII +// identifiers). +// +// See the docstring for [pkgIsCandidate] for an explanation of how this +// heuristic filters potential candidate packages. +func matchesPath(ident, path string) bool { + // Ignore case, for ASCII. + lowerIfASCII := func(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b + } + + // match reports whether path[start:end] matches ident, ignoring [.-]. + match := func(start, end int) bool { + ii := len(ident) - 1 // current byte in ident + pi := end - 1 // current byte in path + for ; pi >= start && ii >= 0; pi-- { + pb := path[pi] + if pb == '-' || pb == '.' { + continue + } + pb = lowerIfASCII(pb) + ib := lowerIfASCII(ident[ii]) + if pb != ib { + return false + } + ii-- + } + return ii < 0 && pi < start // all bytes matched + } + + // segmentEnd and subsegmentEnd hold the end points of the current segment + // and subsegment intervals. + segmentEnd := len(path) + subsegmentEnd := len(path) + + // Count slashes; we only care about the last two segments. nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { + + for i := len(path) - 1; i >= 0; i-- { + switch b := path[i]; b { + // TODO(rfindley): we handle backlashes here only because the previous + // heuristic handled backslashes. This is perhaps overly defensive, but is + // the result of many lessons regarding Chesterton's fence and the + // goimports codebase. + // + // However, this function is only ever called with something called an + // 'importPath'. Is it possible that this is a real import path, and + // therefore we need only consider forward slashes? + case '/', '\\': + if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) { + return true + } nslash++ if nslash == 2 { - return v[i:] + return false // did not match above + } + segmentEnd, subsegmentEnd = i, i // reset + case '-', '.': + if match(i+1, subsegmentEnd) { + return true } + subsegmentEnd = i } } - return v + return match(0, segmentEnd) || match(0, subsegmentEnd) } type visitFn func(node ast.Node) ast.Visitor diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f83465520a..2215a12880 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -47,7 +47,14 @@ type Options struct { // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) + var parserMode parser.Mode + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) if err != nil { return nil, err } @@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { ctx, done := event.Start(ctx, "imports.FixImports") defer done() fileSet := token.NewFileSet() - file, _, err := parse(fileSet, filename, src, opt) + // TODO(rfindley): these default values for ParseComments and AllErrors were + // extracted from gopls, but are they even needed? + file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) if err != nil { return nil, err } - return getFixes(ctx, fileSet, file, filename, opt.Env) + return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - parserMode := parser.Mode(0) + parserMode := parser.SkipObjectResolution if opt.Comments { parserMode |= parser.ParseComments } @@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // formatted file, and returns the postpocessed result. func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { mergeImports(file) - sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) var spacesBefore []string // import paths we need spaces before for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any @@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - parserMode := parser.Mode(0) - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors +func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { + if parserMode&parser.SkipObjectResolution != 0 { + panic("legacy ast.Object resolution is required") } // Try as whole source file. @@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast // If the error is that the source file didn't begin with a // package line and we accept fragmented input, fall through to // try as a source fragment. Stop and return on any other error. - if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + if !fragment || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 82fe644a18..8555e3f83d 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe // 2. Use this to separate module cache scanning from other scanning. func gomodcacheForEnv(goenv map[string]string) string { if gmc := goenv["GOMODCACHE"]; gmc != "" { - return gmc + // golang/go#67156: ensure that the module cache is clean, since it is + // assumed as a prefix to directories scanned by gopathwalk, which are + // themselves clean. + return filepath.Clean(gmc) } gopaths := filepath.SplitList(goenv["GOPATH"]) if len(gopaths) == 0 { @@ -265,9 +268,7 @@ func (r *ModuleResolver) initAllMods() error { return err } if mod.Dir == "" { - if r.env.Logf != nil { - r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) - } + r.env.logf("module %v has not been downloaded and will be ignored", mod.Path) // Can't do anything with a module that's not downloaded. continue } @@ -742,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { subdir := "" - if dir != root.Path { - subdir = dir[len(root.Path)+len("/"):] + if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { + subdir = dir[len(prefix):] } importPath := filepath.ToSlash(subdir) if strings.HasPrefix(importPath, "vendor/") { @@ -766,9 +767,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Logf != nil { - r.env.Logf("decoding module cache path %q: %v", subdir, err) - } + r.env.logf("decoding module cache path %q: %v", subdir, err) return directoryPackageInfo{ status: directoryScanned, err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go new file mode 100644 index 0000000000..cbe4f3c5ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import "context" + +// These types document the APIs below. +// +// TODO(rfindley): consider making these defined types rather than aliases. +type ( + ImportPath = string + PackageName = string + Symbol = string + + // References is set of References found in a Go file. The first map key is the + // left hand side of a selector expression, the second key is the right hand + // side, and the value should always be true. + References = map[PackageName]map[Symbol]bool +) + +// A Result satisfies a missing import. +// +// The Import field describes the missing import spec, and the Package field +// summarizes the package exports. +type Result struct { + Import *ImportInfo + Package *PackageInfo +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A PackageInfo represents what's known about a package. +type PackageInfo struct { + Name string // package name in the package declaration, if known + Exports map[string]bool // set of names of known package level sortSymbols +} + +// A Source provides imports to satisfy unresolved references in the file being +// fixed. +type Source interface { + // LoadPackageNames queries PackageName information for the requested import + // paths, when operating from the provided srcDir. + // + // TODO(rfindley): try to refactor to remove this operation. + LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) + + // ResolveReferences asks the Source for the best package name to satisfy + // each of the missing references, in the context of fixing the given + // filename. + // + // Returns a map from package name to a [Result] for that package name that + // provides the required symbols. Keys may be omitted in the map if no + // candidates satisfy all missing references for that package name. It is up + // to each data source to select the best result for each entry in the + // missing map. + ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go new file mode 100644 index 0000000000..ec996c3ccf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -0,0 +1,129 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "path/filepath" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/internal/gopathwalk" +) + +// ProcessEnvSource implements the [Source] interface using the legacy +// [ProcessEnv] abstraction. +type ProcessEnvSource struct { + env *ProcessEnv + srcDir string + filename string + pkgName string +} + +// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given +// env, to be used for fixing imports in the file with name filename in package +// named pkgName. +func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + return &ProcessEnvSource{ + env: env, + srcDir: srcDir, + filename: filename, + pkgName: pkgName, + }, nil +} + +func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { + r, err := s.env.GetResolver() + if err != nil { + return nil, err + } + return r.loadPackageNames(unknown, srcDir) +} + +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !CanUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := s.env.GetResolver() + if err != nil { + return nil, err + } + if err := resolver.scan(ctx, callback); err != nil { + return nil, err + } + + g, ctx := errgroup.WithContext(ctx) + + searcher := symbolSearcher{ + logf: s.env.logf, + srcDir: s.srcDir, + xtest: strings.HasSuffix(s.pkgName, "_test"), + loadExports: resolver.loadExports, + } + + var resultMu sync.Mutex + results := make(map[string]*Result, len(refs)) + for pkgName, symbols := range refs { + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) + if err != nil { + return err + } + if found == nil { + return nil // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + pkg := &PackageInfo{ + Name: pkgName, + Exports: symbols, + } + resultMu.Lock() + results[pkgName] = &Result{Import: imp, Package: pkg} + resultMu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + var ans []*Result + for _, x := range results { + ans = append(ans, x) + } + return ans, nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go new file mode 100644 index 0000000000..05229f06ce --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -0,0 +1,103 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "sync" + "time" + + "golang.org/x/tools/internal/modindex" +) + +// This code is here rather than in the modindex package +// to avoid import loops + +// implements Source using modindex, so only for module cache. +// +// this is perhaps over-engineered. A new Index is read at first use. +// And then Update is called after every 15 minutes, and a new Index +// is read if the index changed. It is not clear the Mutex is needed. +type IndexSource struct { + modcachedir string + mutex sync.Mutex + ix *modindex.Index + expires time.Time +} + +// create a new Source. Called from NewView in cache/session.go. +func NewIndexSource(cachedir string) *IndexSource { + return &IndexSource{modcachedir: cachedir} +} + +func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { + /// This is used by goimports to resolve the package names of imports of the + // current package, which is irrelevant for the module cache. + return nil, nil +} + +func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { + if err := s.maybeReadIndex(); err != nil { + return nil, err + } + var cs []modindex.Candidate + for pkg, nms := range missing { + for nm := range nms { + x := s.ix.Lookup(pkg, nm, false) + cs = append(cs, x...) + } + } + found := make(map[string]*Result) + for _, c := range cs { + var x *Result + if x = found[c.ImportPath]; x == nil { + x = &Result{ + Import: &ImportInfo{ + ImportPath: c.ImportPath, + Name: "", + }, + Package: &PackageInfo{ + Name: c.PkgName, + Exports: make(map[string]bool), + }, + } + found[c.ImportPath] = x + } + x.Package.Exports[c.Name] = true + } + var ans []*Result + for _, x := range found { + ans = append(ans, x) + } + return ans, nil +} + +func (s *IndexSource) maybeReadIndex() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var readIndex bool + if time.Now().After(s.expires) { + ok, err := modindex.Update(s.modcachedir) + if err != nil { + return err + } + if ok { + readIndex = true + } + } + + if readIndex || s.ix == nil { + ix, err := modindex.ReadIndex(s.modcachedir) + if err != nil { + return err + } + s.ix = ix + // for now refresh every 15 minutes + s.expires = time.Now().Add(time.Minute * 15) + } + + return nil +} diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go new file mode 100644 index 0000000000..1e1a02f239 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -0,0 +1,135 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gopathwalk" +) + +type directory struct { + path Relpath + importPath string + version string // semantic version + syms []symbol +} + +// filterDirs groups the directories by import path, +// sorting the ones with the same import path by semantic version, +// most recent first. +func byImportPath(dirs []Relpath) (map[string][]*directory, error) { + ans := make(map[string][]*directory) // key is import path + for _, d := range dirs { + ip, sv, err := DirToImportPathVersion(d) + if err != nil { + return nil, err + } + ans[ip] = append(ans[ip], &directory{ + path: d, + importPath: ip, + version: sv, + }) + } + for k, v := range ans { + semanticSort(v) + ans[k] = v + } + return ans, nil +} + +// sort the directories by semantic version, latest first +func semanticSort(v []*directory) { + slices.SortFunc(v, func(l, r *directory) int { + if n := semver.Compare(l.version, r.version); n != 0 { + return -n // latest first + } + return strings.Compare(string(l.path), string(r.path)) + }) +} + +// modCacheRegexp splits a relpathpath into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +// DirToImportPathVersion computes import path and semantic version +func DirToImportPathVersion(dir Relpath) (string, string, error) { + m := modCacheRegexp.FindStringSubmatch(string(dir)) + // m[1] is the module path + // m[2] is the version major.minor.patch(-
     1 && flds[1][1] == 'D',
    +			}
    +			if px.Type == Func {
    +				n, err := strconv.Atoi(flds[2])
    +				if err != nil {
    +					continue // should never happen
    +				}
    +				px.Results = int16(n)
    +				if len(flds) >= 4 {
    +					sig := strings.Split(flds[3], " ")
    +					for i := 0; i < len(sig); i++ {
    +						// $ cannot otherwise occur. removing the spaces
    +						// almost works, but for chan struct{}, e.g.
    +						sig[i] = strings.Replace(sig[i], "$", " ", -1)
    +					}
    +					px.Sig = toFields(sig)
    +				}
    +			}
    +			ans = append(ans, px)
    +		}
    +	}
    +	return ans
    +}
    +
    +func toFields(sig []string) []Field {
    +	ans := make([]Field, len(sig)/2)
    +	for i := 0; i < len(ans); i++ {
    +		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
    +	}
    +	return ans
    +}
    +
    +// benchmarks show this is measurably better than strings.Split
    +// split into first 4 fields separated by single space
    +func fastSplit(x string) []string {
    +	ans := make([]string, 0, 4)
    +	nxt := 0
    +	start := 0
    +	for i := 0; i < len(x); i++ {
    +		if x[i] != ' ' {
    +			continue
    +		}
    +		ans = append(ans, x[start:i])
    +		nxt++
    +		start = i + 1
    +		if nxt >= 3 {
    +			break
    +		}
    +	}
    +	ans = append(ans, x[start:])
    +	return ans
    +}
    +
    +func asLexType(c byte) LexType {
    +	switch c {
    +	case 'C':
    +		return Const
    +	case 'V':
    +		return Var
    +	case 'T':
    +		return Type
    +	case 'F':
    +		return Func
    +	}
    +	return -1
    +}
    diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
    new file mode 100644
    index 0000000000..355a53e71a
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
    @@ -0,0 +1,164 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package modindex contains code for building and searching an index to
    +// the Go module cache. The directory containing the index, returned by
    +// IndexDir(), contains a file index-name- that contains the name
    +// of the current index. We believe writing that short file is atomic.
    +// ReadIndex reads that file to get the file name of the index.
    +// WriteIndex writes an index with a unique name and then
    +// writes that name into a new version of index-name-.
    +// ( stands for the CurrentVersion of the index format.)
    +package modindex
    +
    +import (
    +	"path/filepath"
    +	"slices"
    +	"strings"
    +	"time"
    +
    +	"golang.org/x/mod/semver"
    +)
    +
    +// Create always creates a new index for the go module cache that is in cachedir.
    +func Create(cachedir string) error {
    +	_, err := indexModCache(cachedir, true)
    +	return err
    +}
    +
    +// Update the index for the go module cache that is in cachedir,
    +// If there is no existing index it will build one.
    +// If there are changed directories since the last index, it will
    +// write a new one and return true. Otherwise it returns false.
    +func Update(cachedir string) (bool, error) {
    +	return indexModCache(cachedir, false)
    +}
    +
    +// indexModCache writes an index current as of when it is called.
    +// If clear is true the index is constructed from all of GOMODCACHE
    +// otherwise the index is constructed from the last previous index
    +// and the updates to the cache. It returns true if it wrote an index,
    +// false otherwise.
    +func indexModCache(cachedir string, clear bool) (bool, error) {
    +	cachedir, err := filepath.Abs(cachedir)
    +	if err != nil {
    +		return false, err
    +	}
    +	cd := Abspath(cachedir)
    +	future := time.Now().Add(24 * time.Hour) // safely in the future
    +	ok, err := modindexTimed(future, cd, clear)
    +	if err != nil {
    +		return false, err
    +	}
    +	return ok, nil
    +}
    +
    +// modindexTimed writes an index current as of onlyBefore.
    +// If clear is true the index is constructed from all of GOMODCACHE
    +// otherwise the index is constructed from the last previous index
    +// and all the updates to the cache before onlyBefore.
    +// It returns true if it wrote a new index, false if it wrote nothing.
    +func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
    +	var curIndex *Index
    +	if !clear {
    +		var err error
    +		curIndex, err = ReadIndex(string(cachedir))
    +		if clear && err != nil {
    +			return false, err
    +		}
    +		// TODO(pjw): check that most of those directories still exist
    +	}
    +	cfg := &work{
    +		onlyBefore: onlyBefore,
    +		oldIndex:   curIndex,
    +		cacheDir:   cachedir,
    +	}
    +	if curIndex != nil {
    +		cfg.onlyAfter = curIndex.Changed
    +	}
    +	if err := cfg.buildIndex(); err != nil {
    +		return false, err
    +	}
    +	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
    +		// no changes from existing curIndex, don't write a new index
    +		return false, nil
    +	}
    +	if err := cfg.writeIndex(); err != nil {
    +		return false, err
    +	}
    +	return true, nil
    +}
    +
    +type work struct {
    +	onlyBefore time.Time // do not use directories later than this
    +	onlyAfter  time.Time // only interested in directories after this
    +	// directories from before onlyAfter come from oldIndex
    +	oldIndex *Index
    +	newIndex *Index
    +	cacheDir Abspath
    +}
    +
    +func (w *work) buildIndex() error {
    +	// The effective date of the new index should be at least
    +	// slightly earlier than when the directories are scanned
    +	// so set it now.
    +	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
    +	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
    +	if len(dirs) == 0 {
    +		return nil
    +	}
    +	newdirs, err := byImportPath(dirs)
    +	if err != nil {
    +		return err
    +	}
    +	// for each import path it might occur only in newdirs,
    +	// only in w.oldIndex, or in both.
    +	// If it occurs in both, use the semantically later one
    +	if w.oldIndex != nil {
    +		for _, e := range w.oldIndex.Entries {
    +			found, ok := newdirs[e.ImportPath]
    +			if !ok {
    +				w.newIndex.Entries = append(w.newIndex.Entries, e)
    +				continue // use this one, there is no new one
    +			}
    +			if semver.Compare(found[0].version, e.Version) > 0 {
    +				// use the new one
    +			} else {
    +				// use the old one, forget the new one
    +				w.newIndex.Entries = append(w.newIndex.Entries, e)
    +				delete(newdirs, e.ImportPath)
    +			}
    +		}
    +	}
    +	// get symbol information for all the new diredtories
    +	getSymbols(w.cacheDir, newdirs)
    +	// assemble the new index entries
    +	for k, v := range newdirs {
    +		d := v[0]
    +		pkg, names := processSyms(d.syms)
    +		if pkg == "" {
    +			continue // PJW: does this ever happen?
    +		}
    +		entry := Entry{
    +			PkgName:    pkg,
    +			Dir:        d.path,
    +			ImportPath: k,
    +			Version:    d.version,
    +			Names:      names,
    +		}
    +		w.newIndex.Entries = append(w.newIndex.Entries, entry)
    +	}
    +	// sort the entries in the new index
    +	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
    +		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
    +			return n
    +		}
    +		return strings.Compare(l.ImportPath, r.ImportPath)
    +	})
    +	return nil
    +}
    +
    +func (w *work) writeIndex() error {
    +	return writeIndex(w.cacheDir, w.newIndex)
    +}
    diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
    new file mode 100644
    index 0000000000..b918529d43
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
    @@ -0,0 +1,218 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package modindex
    +
    +import (
    +	"fmt"
    +	"go/ast"
    +	"go/parser"
    +	"go/token"
    +	"go/types"
    +	"os"
    +	"path/filepath"
    +	"runtime"
    +	"slices"
    +	"strings"
    +
    +	"golang.org/x/sync/errgroup"
    +)
    +
    +// The name of a symbol contains information about the symbol:
    +//  T for types, TD if the type is deprecated
    +//  C for consts, CD if the const is deprecated
    +//  V for vars, VD if the var is deprecated
    +// and for funcs:  F  ( )*
    +// any spaces in  are replaced by $s so that the fields
    +// of the name are space separated. F is replaced by FD if the func
    +// is deprecated.
    +type symbol struct {
    +	pkg  string // name of the symbols's package
    +	name string // declared name
    +	kind string // T, C, V, or F, follwed by D if deprecated
    +	sig  string // signature information, for F
    +}
    +
    +// find the symbols for the best directories
    +func getSymbols(cd Abspath, dirs map[string][]*directory) {
    +	var g errgroup.Group
    +	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
    +	for _, vv := range dirs {
    +		// throttling some day?
    +		d := vv[0]
    +		g.Go(func() error {
    +			thedir := filepath.Join(string(cd), string(d.path))
    +			mode := parser.SkipObjectResolution | parser.ParseComments
    +
    +			fi, err := os.ReadDir(thedir)
    +			if err != nil {
    +				return nil // log this someday?
    +			}
    +			for _, fx := range fi {
    +				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
    +					continue
    +				}
    +				fname := filepath.Join(thedir, fx.Name())
    +				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
    +				if err != nil {
    +					continue // ignore errors, someday log them?
    +				}
    +				d.syms = append(d.syms, getFileExports(tr)...)
    +			}
    +			return nil
    +		})
    +	}
    +	g.Wait()
    +}
    +
    +func getFileExports(f *ast.File) []symbol {
    +	pkg := f.Name.Name
    +	if pkg == "main" {
    +		return nil
    +	}
    +	var ans []symbol
    +	// should we look for //go:build ignore?
    +	for _, decl := range f.Decls {
    +		switch decl := decl.(type) {
    +		case *ast.FuncDecl:
    +			if decl.Recv != nil {
    +				// ignore methods, as we are completing package selections
    +				continue
    +			}
    +			name := decl.Name.Name
    +			dtype := decl.Type
    +			// not looking at dtype.TypeParams. That is, treating
    +			// generic functions just like non-generic ones.
    +			sig := dtype.Params
    +			kind := "F"
    +			if isDeprecated(decl.Doc) {
    +				kind += "D"
    +			}
    +			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
    +			for _, x := range sig.List {
    +				// This code creates a string representing the type.
    +				// TODO(pjw): it may be fragile:
    +				// 1. x.Type could be nil, perhaps in ill-formed code
    +				// 2. ExprString might someday change incompatibly to
    +				//    include struct tags, which can be arbitrary strings
    +				if x.Type == nil {
    +					// Can this happen without a parse error? (Files with parse
    +					// errors are ignored in getSymbols)
    +					continue // maybe report this someday
    +				}
    +				tp := types.ExprString(x.Type)
    +				if len(tp) == 0 {
    +					// Can this happen?
    +					continue // maybe report this someday
    +				}
    +				// This is only safe if ExprString never returns anything with a $
    +				// The only place a $ can occur seems to be in a struct tag, which
    +				// can be an arbitrary string literal, and ExprString does not presently
    +				// print struct tags. So for this to happen the type of a formal parameter
    +				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
    +				// would have to show the struct tag. Even testing for this case seems
    +				// a waste of effort, but let's remember the possibility
    +				if strings.Contains(tp, "$") {
    +					continue
    +				}
    +				tp = strings.Replace(tp, " ", "$", -1)
    +				if len(x.Names) == 0 {
    +					result = append(result, "_")
    +					result = append(result, tp)
    +				} else {
    +					for _, y := range x.Names {
    +						result = append(result, y.Name)
    +						result = append(result, tp)
    +					}
    +				}
    +			}
    +			sigs := strings.Join(result, " ")
    +			if s := newsym(pkg, name, kind, sigs); s != nil {
    +				ans = append(ans, *s)
    +			}
    +		case *ast.GenDecl:
    +			depr := isDeprecated(decl.Doc)
    +			switch decl.Tok {
    +			case token.CONST, token.VAR:
    +				tp := "V"
    +				if decl.Tok == token.CONST {
    +					tp = "C"
    +				}
    +				if depr {
    +					tp += "D"
    +				}
    +				for _, sp := range decl.Specs {
    +					for _, x := range sp.(*ast.ValueSpec).Names {
    +						if s := newsym(pkg, x.Name, tp, ""); s != nil {
    +							ans = append(ans, *s)
    +						}
    +					}
    +				}
    +			case token.TYPE:
    +				tp := "T"
    +				if depr {
    +					tp += "D"
    +				}
    +				for _, sp := range decl.Specs {
    +					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
    +						ans = append(ans, *s)
    +					}
    +				}
    +			}
    +		}
    +	}
    +	return ans
    +}
    +
    +func newsym(pkg, name, kind, sig string) *symbol {
    +	if len(name) == 0 || !ast.IsExported(name) {
    +		return nil
    +	}
    +	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
    +	return &sym
    +}
    +
    +func isDeprecated(doc *ast.CommentGroup) bool {
    +	if doc == nil {
    +		return false
    +	}
    +	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
    +	// This code fails for /* Deprecated: */, but it's the code from
    +	// gopls/internal/analysis/deprecated
    +	lines := strings.Split(doc.Text(), "\n\n")
    +	for _, line := range lines {
    +		if strings.HasPrefix(line, "Deprecated:") {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// return the package name and the value for the symbols.
    +// if there are multiple packages, choose one arbitrarily
    +// the returned slice is sorted lexicographically
    +func processSyms(syms []symbol) (string, []string) {
    +	if len(syms) == 0 {
    +		return "", nil
    +	}
    +	slices.SortFunc(syms, func(l, r symbol) int {
    +		return strings.Compare(l.name, r.name)
    +	})
    +	pkg := syms[0].pkg
    +	var names []string
    +	for _, s := range syms {
    +		var nx string
    +		if s.pkg == pkg {
    +			if s.sig != "" {
    +				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
    +			} else {
    +				nx = fmt.Sprintf("%s %s", s.name, s.kind)
    +			}
    +			names = append(names, nx)
    +		} else {
    +			continue // PJW: do we want to keep track of these?
    +		}
    +	}
    +	return pkg, names
    +}
    diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
    new file mode 100644
    index 0000000000..ece4488630
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/modindex/types.go
    @@ -0,0 +1,25 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package modindex
    +
    +import (
    +	"strings"
    +)
    +
    +// some special types to avoid confusions
    +
    +// distinguish various types of directory names. It's easy to get confused.
    +type Abspath string // absolute paths
    +type Relpath string // paths with GOMODCACHE prefix removed
    +
    +func toRelpath(cachedir Abspath, s string) Relpath {
    +	if strings.HasPrefix(s, string(cachedir)) {
    +		if s == string(cachedir) {
    +			return Relpath("")
    +		}
    +		return Relpath(s[len(cachedir)+1:])
    +	}
    +	return Relpath(s)
    +}
    diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    index 44719de173..25ebab663b 100644
    --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    @@ -5,8 +5,7 @@
     // Package packagesinternal exposes internal-only fields from go/packages.
     package packagesinternal
     
    -var GetForTest = func(p interface{}) string { return "" }
    -var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
    +var GetDepsErrors = func(p any) []*PackageError { return nil }
     
     type PackageError struct {
     	ImportStack []string // shortest path from package named on command line to this one
    @@ -16,7 +15,6 @@ type PackageError struct {
     
     var TypecheckCgo int
     var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
    -var ForTest int    // must be set as a LoadMode to call GetForTest
     
    -var SetModFlag = func(config interface{}, value string) {}
    -var SetModFile = func(config interface{}, value string) {}
    +var SetModFlag = func(config any, value string) {}
    +var SetModFile = func(config any, value string) {}
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    index 2acd85851e..f6cb37c5c3 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    @@ -21,10 +21,7 @@ import (
     // export data.
     type PkgDecoder struct {
     	// version is the file format version.
    -	version uint32
    -
    -	// aliases determines whether types.Aliases should be created
    -	aliases bool
    +	version Version
     
     	// sync indicates whether the file uses sync markers.
     	sync bool
    @@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
     // NewPkgDecoder returns a PkgDecoder initialized to read the Unified
     // IR export data from input. pkgPath is the package path for the
     // compilation unit that produced the export data.
    -//
    -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
     func NewPkgDecoder(pkgPath, input string) PkgDecoder {
     	pr := PkgDecoder{
     		pkgPath: pkgPath,
    -		//aliases: aliases.Enabled(),
     	}
     
     	// TODO(mdempsky): Implement direct indexing of input string to
    @@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
     
     	r := strings.NewReader(input)
     
    -	assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
    +	var ver uint32
    +	assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
    +	pr.version = Version(ver)
     
    -	switch pr.version {
    -	default:
    -		panic(fmt.Errorf("unsupported version: %v", pr.version))
    -	case 0:
    -		// no flags
    -	case 1:
    +	if pr.version >= numVersions {
    +		panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
    +	}
    +
    +	if pr.version.Has(Flags) {
     		var flags uint32
     		assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
     		pr.sync = flags&flagSyncMarkers != 0
    @@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
     	assert(err == nil)
     
     	pr.elemData = input[pos:]
    -	assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
    +
    +	const fingerprintSize = 8
    +	assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
     
     	return pr
     }
    @@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
     		absIdx += int(pr.elemEndsEnds[k-1])
     	}
     	if absIdx >= int(pr.elemEndsEnds[k]) {
    -		errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
    +		panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
     	}
     	return absIdx
     }
    @@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
     		Idx:    idx,
     	}
     
    -	// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
    -	r.Data = *strings.NewReader(pr.DataIdx(k, idx))
    -
    +	r.Data.Reset(pr.DataIdx(k, idx))
     	r.Sync(SyncRelocs)
     	r.Relocs = make([]RelocEnt, r.Len())
     	for i := range r.Relocs {
    @@ -248,7 +243,7 @@ type Decoder struct {
     
     func (r *Decoder) checkErr(err error) {
     	if err != nil {
    -		errorf("unexpected decoding error: %w", err)
    +		panicf("unexpected decoding error: %w", err)
     	}
     }
     
    @@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
     
     	return path, name, tag
     }
    +
    +// Version reports the version of the bitstream.
    +func (w *Decoder) Version() Version { return w.common.version }
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
    index 6482617a4f..c17a12399d 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
    @@ -12,18 +12,15 @@ import (
     	"io"
     	"math/big"
     	"runtime"
    +	"strings"
     )
     
    -// currentVersion is the current version number.
    -//
    -//   - v0: initial prototype
    -//
    -//   - v1: adds the flags uint32 word
    -const currentVersion uint32 = 1
    -
     // A PkgEncoder provides methods for encoding a package's Unified IR
     // export data.
     type PkgEncoder struct {
    +	// version of the bitstream.
    +	version Version
    +
     	// elems holds the bitstream for previously encoded elements.
     	elems [numRelocs][]string
     
    @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
     // export data files, but can help diagnosing desync errors in
     // higher-level Unified IR reader/writer code. If syncFrames is
     // negative, then sync markers are omitted entirely.
    -func NewPkgEncoder(syncFrames int) PkgEncoder {
    +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
     	return PkgEncoder{
    +		version:    version,
     		stringsIdx: make(map[string]Index),
     		syncFrames: syncFrames,
     	}
    @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
     		assert(binary.Write(out, binary.LittleEndian, x) == nil)
     	}
     
    -	writeUint32(currentVersion)
    +	writeUint32(uint32(pw.version))
     
    -	var flags uint32
    -	if pw.SyncMarkers() {
    -		flags |= flagSyncMarkers
    +	if pw.version.Has(Flags) {
    +		var flags uint32
    +		if pw.SyncMarkers() {
    +			flags |= flagSyncMarkers
    +		}
    +		writeUint32(flags)
     	}
    -	writeUint32(flags)
     
     	// Write elemEndsEnds.
     	var sum uint32
    @@ -159,7 +159,7 @@ type Encoder struct {
     
     // Flush finalizes the element's bitstream and returns its Index.
     func (w *Encoder) Flush() Index {
    -	var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
    +	var sb strings.Builder
     
     	// Backup the data so we write the relocations at the front.
     	var tmp bytes.Buffer
    @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
     
     func (w *Encoder) checkErr(err error) {
     	if err != nil {
    -		errorf("unexpected encoding error: %v", err)
    +		panicf("unexpected encoding error: %v", err)
     	}
     }
     
    @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
     // section (if not already present), and then writing a relocation
     // into the element bitstream.
     func (w *Encoder) String(s string) {
    +	w.StringRef(w.p.StringIdx(s))
    +}
    +
    +// StringRef writes a reference to the given index, which must be a
    +// previously encoded string value.
    +func (w *Encoder) StringRef(idx Index) {
     	w.Sync(SyncString)
    -	w.Reloc(RelocString, w.p.StringIdx(s))
    +	w.Reloc(RelocString, idx)
     }
     
     // Strings encodes and writes a variable-length slice of strings into
    @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
     func (w *Encoder) scalar(val constant.Value) {
     	switch v := constant.Val(val).(type) {
     	default:
    -		errorf("unhandled %v (%v)", val, val.Kind())
    +		panicf("unhandled %v (%v)", val, val.Kind())
     	case bool:
     		w.Code(ValBool)
     		w.Bool(v)
    @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
     	b := v.Append(nil, 'p', -1)
     	w.String(string(b)) // TODO: More efficient encoding.
     }
    +
    +// Version reports the version of the bitstream.
    +func (w *Encoder) Version() Version { return w.p.version }
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
    deleted file mode 100644
    index 5294f6a63e..0000000000
    --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
    +++ /dev/null
    @@ -1,21 +0,0 @@
    -// Copyright 2021 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.7
    -// +build !go1.7
    -
    -// TODO(mdempsky): Remove after #44505 is resolved
    -
    -package pkgbits
    -
    -import "runtime"
    -
    -func walkFrames(pcs []uintptr, visit frameVisitor) {
    -	for _, pc := range pcs {
    -		fn := runtime.FuncForPC(pc)
    -		file, line := fn.FileLine(pc)
    -
    -		visit(file, line, fn.Name(), pc-fn.Entry())
    -	}
    -}
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
    deleted file mode 100644
    index 2324ae7adf..0000000000
    --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -// Copyright 2021 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.7
    -// +build go1.7
    -
    -package pkgbits
    -
    -import "runtime"
    -
    -// walkFrames calls visit for each call frame represented by pcs.
    -//
    -// pcs should be a slice of PCs, as returned by runtime.Callers.
    -func walkFrames(pcs []uintptr, visit frameVisitor) {
    -	if len(pcs) == 0 {
    -		return
    -	}
    -
    -	frames := runtime.CallersFrames(pcs)
    -	for {
    -		frame, more := frames.Next()
    -		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
    -		if !more {
    -			return
    -		}
    -	}
    -}
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
    index ad26d3b28c..50534a2955 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
    @@ -12,6 +12,6 @@ func assert(b bool) {
     	}
     }
     
    -func errorf(format string, args ...interface{}) {
    +func panicf(format string, args ...any) {
     	panic(fmt.Errorf(format, args...))
     }
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
    index 5bd51ef717..1520b73afb 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
    @@ -6,6 +6,7 @@ package pkgbits
     
     import (
     	"fmt"
    +	"runtime"
     	"strings"
     )
     
    @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
     
     type frameVisitor func(file string, line int, name string, offset uintptr)
     
    +// walkFrames calls visit for each call frame represented by pcs.
    +//
    +// pcs should be a slice of PCs, as returned by runtime.Callers.
    +func walkFrames(pcs []uintptr, visit frameVisitor) {
    +	if len(pcs) == 0 {
    +		return
    +	}
    +
    +	frames := runtime.CallersFrames(pcs)
    +	for {
    +		frame, more := frames.Next()
    +		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
    +		if !more {
    +			return
    +		}
    +	}
    +}
    +
     // SyncMarker is an enum type that represents markers that may be
     // written to export data to ensure the reader and writer stay
     // synchronized.
    @@ -110,4 +129,8 @@ const (
     	SyncStmtsEnd
     	SyncLabel
     	SyncOptLabel
    +
    +	SyncMultiExpr
    +	SyncRType
    +	SyncConvRTTI
     )
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
    index 4a5b0ca5f2..582ad56d3e 100644
    --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
    @@ -74,11 +74,14 @@ func _() {
     	_ = x[SyncStmtsEnd-64]
     	_ = x[SyncLabel-65]
     	_ = x[SyncOptLabel-66]
    +	_ = x[SyncMultiExpr-67]
    +	_ = x[SyncRType-68]
    +	_ = x[SyncConvRTTI-69]
     }
     
    -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
    +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
     
    -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
    +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
     
     func (i SyncMarker) String() string {
     	i -= 1
    diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
    new file mode 100644
    index 0000000000..53af9df22b
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
    @@ -0,0 +1,85 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package pkgbits
    +
    +// Version indicates a version of a unified IR bitstream.
    +// Each Version indicates the addition, removal, or change of
    +// new data in the bitstream.
    +//
    +// These are serialized to disk and the interpretation remains fixed.
    +type Version uint32
    +
    +const (
    +	// V0: initial prototype.
    +	//
    +	// All data that is not assigned a Field is in version V0
    +	// and has not been deprecated.
    +	V0 Version = iota
    +
    +	// V1: adds the Flags uint32 word
    +	V1
    +
    +	// V2: removes unused legacy fields and supports type parameters for aliases.
    +	// - remove the legacy "has init" bool from the public root
    +	// - remove obj's "derived func instance" bool
    +	// - add a TypeParamNames field to ObjAlias
    +	// - remove derived info "needed" bool
    +	V2
    +
    +	numVersions = iota
    +)
    +
    +// Field denotes a unit of data in the serialized unified IR bitstream.
    +// It is conceptually a like field in a structure.
    +//
    +// We only really need Fields when the data may or may not be present
    +// in a stream based on the Version of the bitstream.
    +//
    +// Unlike much of pkgbits, Fields are not serialized and
    +// can change values as needed.
    +type Field int
    +
    +const (
    +	// Flags in a uint32 in the header of a bitstream
    +	// that is used to indicate whether optional features are enabled.
    +	Flags Field = iota
    +
    +	// Deprecated: HasInit was a bool indicating whether a package
    +	// has any init functions.
    +	HasInit
    +
    +	// Deprecated: DerivedFuncInstance was a bool indicating
    +	// whether an object was a function instance.
    +	DerivedFuncInstance
    +
    +	// ObjAlias has a list of TypeParamNames.
    +	AliasTypeParamNames
    +
    +	// Deprecated: DerivedInfoNeeded was a bool indicating
    +	// whether a type was a derived type.
    +	DerivedInfoNeeded
    +
    +	numFields = iota
    +)
    +
    +// introduced is the version a field was added.
    +var introduced = [numFields]Version{
    +	Flags:               V1,
    +	AliasTypeParamNames: V2,
    +}
    +
    +// removed is the version a field was removed in or 0 for fields
    +// that have not yet been deprecated.
    +// (So removed[f]-1 is the last version it is included in.)
    +var removed = [numFields]Version{
    +	HasInit:             V2,
    +	DerivedFuncInstance: V2,
    +	DerivedInfoNeeded:   V2,
    +}
    +
    +// Has reports whether field f is present in a bitstream at version v.
    +func (v Version) Has(f Field) bool {
    +	return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
    +}
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
    new file mode 100644
    index 0000000000..7cca431cd6
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
    @@ -0,0 +1,359 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by generate.go. DO NOT EDIT.
    +
    +package stdlib
    +
    +type pkginfo struct {
    +	name string
    +	deps string // list of indices of dependencies, as varint-encoded deltas
    +}
    +
    +var deps = [...]pkginfo{
    +	{"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"},
    +	{"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"},
    +	{"bufio", "\x03k}E\x13"},
    +	{"bytes", "n+R\x03\fG\x02\x02"},
    +	{"cmp", ""},
    +	{"compress/bzip2", "\x02\x02\xe7\x01B"},
    +	{"compress/flate", "\x02l\x03z\r\x024\x01\x03"},
    +	{"compress/gzip", "\x02\x04a\a\x03\x15eT"},
    +	{"compress/lzw", "\x02l\x03z"},
    +	{"compress/zlib", "\x02\x04a\a\x03\x13\x01f"},
    +	{"container/heap", "\xae\x02"},
    +	{"container/list", ""},
    +	{"container/ring", ""},
    +	{"context", "n\\h\x01\f"},
    +	{"crypto", "\x84\x01gD"},
    +	{"crypto/aes", "\x10\n\a\x8e\x02"},
    +	{"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"},
    +	{"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"},
    +	{"crypto/dsa", "@\x04*}\x0e"},
    +	{"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"},
    +	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"},
    +	{"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"},
    +	{"crypto/elliptic", "0>}\x0e9"},
    +	{"crypto/fips140", " \x05\x91\x01"},
    +	{"crypto/hkdf", "-\x12\x01.\x16"},
    +	{"crypto/hmac", "\x1a\x14\x11\x01\x113"},
    +	{"crypto/internal/boring", "\x0e\x02\rg"},
    +	{"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
    +	{"crypto/internal/boring/bcache", "\xb3\x02\x12"},
    +	{"crypto/internal/boring/sig", ""},
    +	{"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"},
    +	{"crypto/internal/entropy", "E"},
    +	{"crypto/internal/fips140", ">0}9\f\x15"},
    +	{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"},
    +	{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"},
    +	{"crypto/internal/fips140/alias", "\xc5\x02"},
    +	{"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"},
    +	{"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
    +	{"crypto/internal/fips140/check/checktest", "%\xff\x01!"},
    +	{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"},
    +	{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"},
    +	{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"},
    +	{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"},
    +	{"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"},
    +	{"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"},
    +	{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"},
    +	{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"},
    +	{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"},
    +	{"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"},
    +	{"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"},
    +	{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"},
    +	{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"},
    +	{"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
    +	{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"},
    +	{"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
    +	{"crypto/internal/fips140/ssh", " \x05"},
    +	{"crypto/internal/fips140/subtle", "#\x19\xbe\x01"},
    +	{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"},
    +	{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"},
    +	{"crypto/internal/fips140deps", ""},
    +	{"crypto/internal/fips140deps/byteorder", "\x9a\x01"},
    +	{"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
    +	{"crypto/internal/fips140deps/godebug", "\xb6\x01"},
    +	{"crypto/internal/fips140hash", "5\x1a5\xc1\x01"},
    +	{"crypto/internal/fips140only", "'\r\x01\x01N25"},
    +	{"crypto/internal/fips140test", ""},
    +	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"},
    +	{"crypto/internal/impl", "\xb0\x02"},
    +	{"crypto/internal/randutil", "\xeb\x01\x12"},
    +	{"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"},
    +	{"crypto/internal/sysrand/internal/seccomp", "n"},
    +	{"crypto/md5", "\x0e2.\x16\x16`"},
    +	{"crypto/mlkem", "/"},
    +	{"crypto/pbkdf2", "2\r\x01.\x16"},
    +	{"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"},
    +	{"crypto/rc4", "#\x1d.\xc1\x01"},
    +	{"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"},
    +	{"crypto/sha1", "\x0e\f&.\x16\x16\x14L"},
    +	{"crypto/sha256", "\x0e\f\x1aP"},
    +	{"crypto/sha3", "\x0e'O\xc1\x01"},
    +	{"crypto/sha512", "\x0e\f\x1cN"},
    +	{"crypto/subtle", "8\x98\x01T"},
    +	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"},
    +	{"crypto/tls/internal/fips140tls", " \x93\x02"},
    +	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"},
    +	{"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"},
    +	{"crypto/x509/pkix", "d\x06\a\x88\x01F"},
    +	{"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"},
    +	{"database/sql/driver", "\ra\x03\xae\x01\x10\x10"},
    +	{"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"},
    +	{"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"},
    +	{"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"},
    +	{"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"},
    +	{"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"},
    +	{"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"},
    +	{"debug/plan9obj", "g\a\x03`\x1a,"},
    +	{"embed", "n+:\x18\x01S"},
    +	{"embed/internal/embedtest", ""},
    +	{"encoding", ""},
    +	{"encoding/ascii85", "\xeb\x01D"},
    +	{"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"},
    +	{"encoding/base32", "\xeb\x01B\x02"},
    +	{"encoding/base64", "\x9a\x01QB\x02"},
    +	{"encoding/binary", "n}\r'\x0e\x05"},
    +	{"encoding/csv", "\x02\x01k\x03zE\x11\x02"},
    +	{"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"},
    +	{"encoding/hex", "n\x03zB\x03"},
    +	{"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"},
    +	{"encoding/pem", "\x03c\b}B\x03"},
    +	{"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"},
    +	{"errors", "\xca\x01{"},
    +	{"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"},
    +	{"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"},
    +	{"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"},
    +	{"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"},
    +	{"go/ast/internal/tests", ""},
    +	{"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"},
    +	{"go/build/constraint", "n\xc1\x01\x01\x11\x02"},
    +	{"go/constant", "q\x10w\x01\x015\x01\x02\x11"},
    +	{"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"},
    +	{"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"},
    +	{"go/format", "\x03n\x01\f\x01\x02jE"},
    +	{"go/importer", "t\a\x01\x01\x04\x01i9"},
    +	{"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"},
    +	{"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"},
    +	{"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"},
    +	{"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"},
    +	{"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"},
    +	{"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"},
    +	{"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"},
    +	{"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"},
    +	{"go/version", "\xbb\x01u"},
    +	{"hash", "\xeb\x01"},
    +	{"hash/adler32", "n\x16\x16"},
    +	{"hash/crc32", "n\x16\x16\x14\x84\x01\x01"},
    +	{"hash/crc64", "n\x16\x16\x98\x01"},
    +	{"hash/fnv", "n\x16\x16`"},
    +	{"hash/maphash", "\x95\x01\x05\x1b\x03@M"},
    +	{"html", "\xb0\x02\x02\x11"},
    +	{"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"},
    +	{"image", "\x02l\x1f^\x0f5\x03\x01"},
    +	{"image/color", ""},
    +	{"image/color/palette", "\x8d\x01"},
    +	{"image/draw", "\x8c\x01\x01\x04"},
    +	{"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"},
    +	{"image/internal/imageutil", "\x8c\x01"},
    +	{"image/jpeg", "\x02l\x1e\x01\x04Z"},
    +	{"image/png", "\x02\a^\n\x13\x02\x06\x01^D"},
    +	{"index/suffixarray", "\x03d\a}\r*\v\x01"},
    +	{"internal/abi", "\xb5\x01\x90\x01"},
    +	{"internal/asan", "\xc5\x02"},
    +	{"internal/bisect", "\xa4\x02\x0e\x01"},
    +	{"internal/buildcfg", "qG_\x06\x02\x05\v\x01"},
    +	{"internal/bytealg", "\xae\x01\x97\x01"},
    +	{"internal/byteorder", ""},
    +	{"internal/cfg", ""},
    +	{"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"},
    +	{"internal/copyright", ""},
    +	{"internal/coverage", ""},
    +	{"internal/coverage/calloc", ""},
    +	{"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"},
    +	{"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"},
    +	{"internal/coverage/cmerge", "q-Z"},
    +	{"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"},
    +	{"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"},
    +	{"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"},
    +	{"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."},
    +	{"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"},
    +	{"internal/coverage/rtcov", "\xc5\x02"},
    +	{"internal/coverage/slicereader", "g\nzZ"},
    +	{"internal/coverage/slicewriter", "qz"},
    +	{"internal/coverage/stringtab", "q8\x04>"},
    +	{"internal/coverage/test", ""},
    +	{"internal/coverage/uleb128", ""},
    +	{"internal/cpu", "\xc5\x02"},
    +	{"internal/dag", "\x04m\xbc\x01\x03"},
    +	{"internal/diff", "\x03n\xbd\x01\x02"},
    +	{"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"},
    +	{"internal/filepathlite", "n+:\x19A"},
    +	{"internal/fmtsort", "\x04\x9b\x02\x0e"},
    +	{"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"},
    +	{"internal/goarch", ""},
    +	{"internal/godebug", "\x97\x01 {\x01\x12"},
    +	{"internal/godebugs", ""},
    +	{"internal/goexperiment", ""},
    +	{"internal/goos", ""},
    +	{"internal/goroot", "\x97\x02\x01\x05\x13\x02"},
    +	{"internal/gover", "\x04"},
    +	{"internal/goversion", ""},
    +	{"internal/itoa", ""},
    +	{"internal/lazyregexp", "\x97\x02\v\x0e\x02"},
    +	{"internal/lazytemplate", "\xeb\x01,\x19\x02\v"},
    +	{"internal/msan", "\xc5\x02"},
    +	{"internal/nettrace", ""},
    +	{"internal/obscuretestdata", "f\x85\x01,"},
    +	{"internal/oserror", "n"},
    +	{"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"},
    +	{"internal/platform", ""},
    +	{"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"},
    +	{"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"},
    +	{"internal/profilerecord", ""},
    +	{"internal/race", "\x95\x01\xb0\x01"},
    +	{"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"},
    +	{"unsafe", ""},
    +	{"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"},
    +	{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"},
    +	{"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"},
    +	{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
    +	{"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
    +	{"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"},
    +	{"vendor/golang.org/x/net/dns/dnsmessage", "n"},
    +	{"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"},
    +	{"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"},
    +	{"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"},
    +	{"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"},
    +	{"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"},
    +	{"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"},
    +	{"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"},
    +	{"vendor/golang.org/x/text/transform", "\x03k}X"},
    +	{"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"},
    +	{"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"},
    +	{"weak", "\x95\x01\x8f\x01!"},
    +}
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
    new file mode 100644
    index 0000000000..f6909878a8
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
    @@ -0,0 +1,89 @@
    +// Copyright 2025 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package stdlib
    +
    +// This file provides the API for the import graph of the standard library.
    +//
    +// Be aware that the compiler-generated code for every package
    +// implicitly depends on package "runtime" and a handful of others
    +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
    +
    +import (
    +	"encoding/binary"
    +	"iter"
    +	"slices"
    +	"strings"
    +)
    +
    +// Imports returns the sequence of packages directly imported by the
    +// named standard packages, in name order.
    +// The imports of an unknown package are the empty set.
    +//
    +// The graph is built into the application and may differ from the
    +// graph in the Go source tree being analyzed by the application.
    +func Imports(pkgs ...string) iter.Seq[string] {
    +	return func(yield func(string) bool) {
    +		for _, pkg := range pkgs {
    +			if i, ok := find(pkg); ok {
    +				var depIndex uint64
    +				for data := []byte(deps[i].deps); len(data) > 0; {
    +					delta, n := binary.Uvarint(data)
    +					depIndex += delta
    +					if !yield(deps[depIndex].name) {
    +						return
    +					}
    +					data = data[n:]
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +// Dependencies returns the set of all dependencies of the named
    +// standard packages, including the initial package,
    +// in a deterministic topological order.
    +// The dependencies of an unknown package are the empty set.
    +//
    +// The graph is built into the application and may differ from the
    +// graph in the Go source tree being analyzed by the application.
    +func Dependencies(pkgs ...string) iter.Seq[string] {
    +	return func(yield func(string) bool) {
    +		for _, pkg := range pkgs {
    +			if i, ok := find(pkg); ok {
    +				var seen [1 + len(deps)/8]byte // bit set of seen packages
    +				var visit func(i int) bool
    +				visit = func(i int) bool {
    +					bit := byte(1) << (i % 8)
    +					if seen[i/8]&bit == 0 {
    +						seen[i/8] |= bit
    +						var depIndex uint64
    +						for data := []byte(deps[i].deps); len(data) > 0; {
    +							delta, n := binary.Uvarint(data)
    +							depIndex += delta
    +							if !visit(int(depIndex)) {
    +								return false
    +							}
    +							data = data[n:]
    +						}
    +						if !yield(deps[i].name) {
    +							return false
    +						}
    +					}
    +					return true
    +				}
    +				if !visit(i) {
    +					return
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +// find returns the index of pkg in the deps table.
    +func find(pkg string) (int, bool) {
    +	return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
    +		return strings.Compare(p.name, n)
    +	})
    +}
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    index fd6892075e..00776a31b6 100644
    --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    @@ -1,4 +1,4 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    +// Copyright 2025 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    @@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ErrWriteAfterClose", Var, 0},
     		{"ErrWriteTooLong", Var, 0},
     		{"FileInfoHeader", Func, 1},
    +		{"FileInfoNames", Type, 23},
     		{"Format", Type, 10},
     		{"FormatGNU", Const, 10},
     		{"FormatPAX", Const, 10},
    @@ -267,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"ErrTooLarge", Var, 0},
     		{"Fields", Func, 0},
     		{"FieldsFunc", Func, 0},
    +		{"FieldsFuncSeq", Func, 24},
    +		{"FieldsSeq", Func, 24},
     		{"HasPrefix", Func, 0},
     		{"HasSuffix", Func, 0},
     		{"Index", Func, 0},
    @@ -279,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"LastIndexAny", Func, 0},
     		{"LastIndexByte", Func, 5},
     		{"LastIndexFunc", Func, 0},
    +		{"Lines", Func, 24},
     		{"Map", Func, 0},
     		{"MinRead", Const, 0},
     		{"NewBuffer", Func, 0},
    @@ -292,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"Split", Func, 0},
     		{"SplitAfter", Func, 0},
     		{"SplitAfterN", Func, 0},
    +		{"SplitAfterSeq", Func, 24},
     		{"SplitN", Func, 0},
    +		{"SplitSeq", Func, 24},
     		{"Title", Func, 0},
     		{"ToLower", Func, 0},
     		{"ToLowerSpecial", Func, 0},
    @@ -534,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"NewCTR", Func, 0},
     		{"NewGCM", Func, 2},
     		{"NewGCMWithNonceSize", Func, 5},
    +		{"NewGCMWithRandomNonce", Func, 24},
     		{"NewGCMWithTagSize", Func, 11},
     		{"NewOFB", Func, 0},
     		{"Stream", Type, 0},
    @@ -672,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
     		{"Unmarshal", Func, 0},
     		{"UnmarshalCompressed", Func, 15},
     	},
    +	"crypto/fips140": {
    +		{"Enabled", Func, 24},
    +	},
    +	"crypto/hkdf": {
    +		{"Expand", Func, 24},
    +		{"Extract", Func, 24},
    +		{"Key", Func, 24},
    +	},
     	"crypto/hmac": {
     		{"Equal", Func, 1},
     		{"New", Func, 0},
    @@ -682,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
     		{"Size", Const, 0},
     		{"Sum", Func, 2},
     	},
    +	"crypto/mlkem": {
    +		{"(*DecapsulationKey1024).Bytes", Method, 24},
    +		{"(*DecapsulationKey1024).Decapsulate", Method, 24},
    +		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
    +		{"(*DecapsulationKey768).Bytes", Method, 24},
    +		{"(*DecapsulationKey768).Decapsulate", Method, 24},
    +		{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
    +		{"(*EncapsulationKey1024).Bytes", Method, 24},
    +		{"(*EncapsulationKey1024).Encapsulate", Method, 24},
    +		{"(*EncapsulationKey768).Bytes", Method, 24},
    +		{"(*EncapsulationKey768).Encapsulate", Method, 24},
    +		{"CiphertextSize1024", Const, 24},
    +		{"CiphertextSize768", Const, 24},
    +		{"DecapsulationKey1024", Type, 24},
    +		{"DecapsulationKey768", Type, 24},
    +		{"EncapsulationKey1024", Type, 24},
    +		{"EncapsulationKey768", Type, 24},
    +		{"EncapsulationKeySize1024", Const, 24},
    +		{"EncapsulationKeySize768", Const, 24},
    +		{"GenerateKey1024", Func, 24},
    +		{"GenerateKey768", Func, 24},
    +		{"NewDecapsulationKey1024", Func, 24},
    +		{"NewDecapsulationKey768", Func, 24},
    +		{"NewEncapsulationKey1024", Func, 24},
    +		{"NewEncapsulationKey768", Func, 24},
    +		{"SeedSize", Const, 24},
    +		{"SharedKeySize", Const, 24},
    +	},
    +	"crypto/pbkdf2": {
    +		{"Key", Func, 24},
    +	},
     	"crypto/rand": {
     		{"Int", Func, 0},
     		{"Prime", Func, 0},
     		{"Read", Func, 0},
     		{"Reader", Var, 0},
    +		{"Text", Func, 24},
     	},
     	"crypto/rc4": {
     		{"(*Cipher).Reset", Method, 0},
    @@ -765,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
     		{"Sum224", Func, 2},
     		{"Sum256", Func, 2},
     	},
    +	"crypto/sha3": {
    +		{"(*SHA3).AppendBinary", Method, 24},
    +		{"(*SHA3).BlockSize", Method, 24},
    +		{"(*SHA3).MarshalBinary", Method, 24},
    +		{"(*SHA3).Reset", Method, 24},
    +		{"(*SHA3).Size", Method, 24},
    +		{"(*SHA3).Sum", Method, 24},
    +		{"(*SHA3).UnmarshalBinary", Method, 24},
    +		{"(*SHA3).Write", Method, 24},
    +		{"(*SHAKE).AppendBinary", Method, 24},
    +		{"(*SHAKE).BlockSize", Method, 24},
    +		{"(*SHAKE).MarshalBinary", Method, 24},
    +		{"(*SHAKE).Read", Method, 24},
    +		{"(*SHAKE).Reset", Method, 24},
    +		{"(*SHAKE).UnmarshalBinary", Method, 24},
    +		{"(*SHAKE).Write", Method, 24},
    +		{"New224", Func, 24},
    +		{"New256", Func, 24},
    +		{"New384", Func, 24},
    +		{"New512", Func, 24},
    +		{"NewCSHAKE128", Func, 24},
    +		{"NewCSHAKE256", Func, 24},
    +		{"NewSHAKE128", Func, 24},
    +		{"NewSHAKE256", Func, 24},
    +		{"SHA3", Type, 24},
    +		{"SHAKE", Type, 24},
    +		{"Sum224", Func, 24},
    +		{"Sum256", Func, 24},
    +		{"Sum384", Func, 24},
    +		{"Sum512", Func, 24},
    +		{"SumSHAKE128", Func, 24},
    +		{"SumSHAKE256", Func, 24},
    +	},
     	"crypto/sha512": {
     		{"BlockSize", Const, 0},
     		{"New", Func, 0},
    @@ -787,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ConstantTimeEq", Func, 0},
     		{"ConstantTimeLessOrEq", Func, 2},
     		{"ConstantTimeSelect", Func, 0},
    +		{"WithDataIndependentTiming", Func, 24},
     		{"XORBytes", Func, 20},
     	},
     	"crypto/tls": {
    @@ -820,6 +901,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
     		{"(*Dialer).Dial", Method, 15},
     		{"(*Dialer).DialContext", Method, 15},
    +		{"(*ECHRejectionError).Error", Method, 23},
     		{"(*QUICConn).Close", Method, 21},
     		{"(*QUICConn).ConnectionState", Method, 21},
     		{"(*QUICConn).HandleData", Method, 21},
    @@ -827,6 +909,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*QUICConn).SendSessionTicket", Method, 21},
     		{"(*QUICConn).SetTransportParameters", Method, 21},
     		{"(*QUICConn).Start", Method, 21},
    +		{"(*QUICConn).StoreSession", Method, 23},
     		{"(*SessionState).Bytes", Method, 21},
     		{"(AlertError).Error", Method, 21},
     		{"(ClientAuthType).String", Method, 15},
    @@ -861,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ClientHelloInfo", Type, 4},
     		{"ClientHelloInfo.CipherSuites", Field, 4},
     		{"ClientHelloInfo.Conn", Field, 8},
    +		{"ClientHelloInfo.Extensions", Field, 24},
     		{"ClientHelloInfo.ServerName", Field, 4},
     		{"ClientHelloInfo.SignatureSchemes", Field, 8},
     		{"ClientHelloInfo.SupportedCurves", Field, 4},
    @@ -877,6 +961,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"Config.ClientSessionCache", Field, 3},
     		{"Config.CurvePreferences", Field, 3},
     		{"Config.DynamicRecordSizingDisabled", Field, 7},
    +		{"Config.EncryptedClientHelloConfigList", Field, 23},
    +		{"Config.EncryptedClientHelloKeys", Field, 24},
    +		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
     		{"Config.GetCertificate", Field, 4},
     		{"Config.GetClientCertificate", Field, 8},
     		{"Config.GetConfigForClient", Field, 8},
    @@ -902,6 +989,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ConnectionState", Type, 0},
     		{"ConnectionState.CipherSuite", Field, 0},
     		{"ConnectionState.DidResume", Field, 1},
    +		{"ConnectionState.ECHAccepted", Field, 23},
     		{"ConnectionState.HandshakeComplete", Field, 0},
     		{"ConnectionState.NegotiatedProtocol", Field, 0},
     		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
    @@ -925,7 +1013,13 @@ var PackageSymbols = map[string][]Symbol{
     		{"ECDSAWithP384AndSHA384", Const, 8},
     		{"ECDSAWithP521AndSHA512", Const, 8},
     		{"ECDSAWithSHA1", Const, 10},
    +		{"ECHRejectionError", Type, 23},
    +		{"ECHRejectionError.RetryConfigList", Field, 23},
     		{"Ed25519", Const, 13},
    +		{"EncryptedClientHelloKey", Type, 24},
    +		{"EncryptedClientHelloKey.Config", Field, 24},
    +		{"EncryptedClientHelloKey.PrivateKey", Field, 24},
    +		{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
     		{"InsecureCipherSuites", Func, 14},
     		{"Listen", Func, 0},
     		{"LoadX509KeyPair", Func, 0},
    @@ -943,6 +1037,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ParseSessionState", Func, 21},
     		{"QUICClient", Func, 21},
     		{"QUICConfig", Type, 21},
    +		{"QUICConfig.EnableSessionEvents", Field, 23},
     		{"QUICConfig.TLSConfig", Field, 21},
     		{"QUICConn", Type, 21},
     		{"QUICEncryptionLevel", Type, 21},
    @@ -954,16 +1049,20 @@ var PackageSymbols = map[string][]Symbol{
     		{"QUICEvent.Data", Field, 21},
     		{"QUICEvent.Kind", Field, 21},
     		{"QUICEvent.Level", Field, 21},
    +		{"QUICEvent.SessionState", Field, 23},
     		{"QUICEvent.Suite", Field, 21},
     		{"QUICEventKind", Type, 21},
     		{"QUICHandshakeDone", Const, 21},
     		{"QUICNoEvent", Const, 21},
     		{"QUICRejectedEarlyData", Const, 21},
    +		{"QUICResumeSession", Const, 23},
     		{"QUICServer", Func, 21},
     		{"QUICSessionTicketOptions", Type, 21},
     		{"QUICSessionTicketOptions.EarlyData", Field, 21},
    +		{"QUICSessionTicketOptions.Extra", Field, 23},
     		{"QUICSetReadSecret", Const, 21},
     		{"QUICSetWriteSecret", Const, 21},
    +		{"QUICStoreSession", Const, 23},
     		{"QUICTransportParameters", Const, 21},
     		{"QUICTransportParametersRequired", Const, 21},
     		{"QUICWriteData", Const, 21},
    @@ -1019,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"VersionTLS12", Const, 2},
     		{"VersionTLS13", Const, 12},
     		{"X25519", Const, 8},
    +		{"X25519MLKEM768", Const, 24},
     		{"X509KeyPair", Func, 0},
     	},
     	"crypto/x509": {
    @@ -1036,13 +1136,19 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Certificate).Verify", Method, 0},
     		{"(*Certificate).VerifyHostname", Method, 0},
     		{"(*CertificateRequest).CheckSignature", Method, 5},
    +		{"(*OID).UnmarshalBinary", Method, 23},
    +		{"(*OID).UnmarshalText", Method, 23},
     		{"(*RevocationList).CheckSignatureFrom", Method, 19},
     		{"(CertificateInvalidError).Error", Method, 0},
     		{"(ConstraintViolationError).Error", Method, 0},
     		{"(HostnameError).Error", Method, 0},
     		{"(InsecureAlgorithmError).Error", Method, 6},
    +		{"(OID).AppendBinary", Method, 24},
    +		{"(OID).AppendText", Method, 24},
     		{"(OID).Equal", Method, 22},
     		{"(OID).EqualASN1OID", Method, 22},
    +		{"(OID).MarshalBinary", Method, 23},
    +		{"(OID).MarshalText", Method, 23},
     		{"(OID).String", Method, 22},
     		{"(PublicKeyAlgorithm).String", Method, 10},
     		{"(SignatureAlgorithm).String", Method, 6},
    @@ -1067,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
     		{"Certificate.Extensions", Field, 2},
     		{"Certificate.ExtraExtensions", Field, 2},
     		{"Certificate.IPAddresses", Field, 1},
    +		{"Certificate.InhibitAnyPolicy", Field, 24},
    +		{"Certificate.InhibitAnyPolicyZero", Field, 24},
    +		{"Certificate.InhibitPolicyMapping", Field, 24},
    +		{"Certificate.InhibitPolicyMappingZero", Field, 24},
     		{"Certificate.IsCA", Field, 0},
     		{"Certificate.Issuer", Field, 0},
     		{"Certificate.IssuingCertificateURL", Field, 2},
    @@ -1083,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Certificate.PermittedURIDomains", Field, 10},
     		{"Certificate.Policies", Field, 22},
     		{"Certificate.PolicyIdentifiers", Field, 0},
    +		{"Certificate.PolicyMappings", Field, 24},
     		{"Certificate.PublicKey", Field, 0},
     		{"Certificate.PublicKeyAlgorithm", Field, 0},
     		{"Certificate.Raw", Field, 0},
    @@ -1090,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"Certificate.RawSubject", Field, 0},
     		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
     		{"Certificate.RawTBSCertificate", Field, 0},
    +		{"Certificate.RequireExplicitPolicy", Field, 24},
    +		{"Certificate.RequireExplicitPolicyZero", Field, 24},
     		{"Certificate.SerialNumber", Field, 0},
     		{"Certificate.Signature", Field, 0},
     		{"Certificate.SignatureAlgorithm", Field, 0},
    @@ -1181,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"NameConstraintsWithoutSANs", Const, 10},
     		{"NameMismatch", Const, 8},
     		{"NewCertPool", Func, 0},
    +		{"NoValidChains", Const, 24},
     		{"NotAuthorizedToSign", Const, 0},
     		{"OID", Type, 22},
     		{"OIDFromInts", Func, 22},
    @@ -1196,11 +1310,15 @@ var PackageSymbols = map[string][]Symbol{
     		{"ParseCertificates", Func, 0},
     		{"ParseDERCRL", Func, 0},
     		{"ParseECPrivateKey", Func, 1},
    +		{"ParseOID", Func, 23},
     		{"ParsePKCS1PrivateKey", Func, 0},
     		{"ParsePKCS1PublicKey", Func, 10},
     		{"ParsePKCS8PrivateKey", Func, 0},
     		{"ParsePKIXPublicKey", Func, 0},
     		{"ParseRevocationList", Func, 19},
    +		{"PolicyMapping", Type, 24},
    +		{"PolicyMapping.IssuerDomainPolicy", Field, 24},
    +		{"PolicyMapping.SubjectDomainPolicy", Field, 24},
     		{"PublicKeyAlgorithm", Type, 0},
     		{"PureEd25519", Const, 13},
     		{"RSA", Const, 0},
    @@ -1247,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"UnknownPublicKeyAlgorithm", Const, 0},
     		{"UnknownSignatureAlgorithm", Const, 0},
     		{"VerifyOptions", Type, 0},
    +		{"VerifyOptions.CertificatePolicies", Field, 24},
     		{"VerifyOptions.CurrentTime", Field, 0},
     		{"VerifyOptions.DNSName", Field, 0},
     		{"VerifyOptions.Intermediates", Field, 0},
    @@ -1957,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*File).DynString", Method, 1},
     		{"(*File).DynValue", Method, 21},
     		{"(*File).DynamicSymbols", Method, 4},
    +		{"(*File).DynamicVersionNeeds", Method, 24},
    +		{"(*File).DynamicVersions", Method, 24},
     		{"(*File).ImportedLibraries", Method, 0},
     		{"(*File).ImportedSymbols", Method, 0},
     		{"(*File).Section", Method, 0},
    @@ -2030,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Type).String", Method, 0},
     		{"(Version).GoString", Method, 0},
     		{"(Version).String", Method, 0},
    +		{"(VersionIndex).Index", Method, 24},
    +		{"(VersionIndex).IsHidden", Method, 24},
     		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
     		{"COMPRESS_HIOS", Const, 6},
     		{"COMPRESS_HIPROC", Const, 6},
    @@ -2222,6 +2345,19 @@ var PackageSymbols = map[string][]Symbol{
     		{"DynFlag", Type, 0},
     		{"DynFlag1", Type, 21},
     		{"DynTag", Type, 0},
    +		{"DynamicVersion", Type, 24},
    +		{"DynamicVersion.Deps", Field, 24},
    +		{"DynamicVersion.Flags", Field, 24},
    +		{"DynamicVersion.Index", Field, 24},
    +		{"DynamicVersion.Name", Field, 24},
    +		{"DynamicVersionDep", Type, 24},
    +		{"DynamicVersionDep.Dep", Field, 24},
    +		{"DynamicVersionDep.Flags", Field, 24},
    +		{"DynamicVersionDep.Index", Field, 24},
    +		{"DynamicVersionFlag", Type, 24},
    +		{"DynamicVersionNeed", Type, 24},
    +		{"DynamicVersionNeed.Name", Field, 24},
    +		{"DynamicVersionNeed.Needs", Field, 24},
     		{"EI_ABIVERSION", Const, 0},
     		{"EI_CLASS", Const, 0},
     		{"EI_DATA", Const, 0},
    @@ -2541,6 +2677,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"PT_NOTE", Const, 0},
     		{"PT_NULL", Const, 0},
     		{"PT_OPENBSD_BOOTDATA", Const, 16},
    +		{"PT_OPENBSD_NOBTCFI", Const, 23},
     		{"PT_OPENBSD_RANDOMIZE", Const, 16},
     		{"PT_OPENBSD_WXNEEDED", Const, 16},
     		{"PT_PAX_FLAGS", Const, 16},
    @@ -3620,13 +3757,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"STT_COMMON", Const, 0},
     		{"STT_FILE", Const, 0},
     		{"STT_FUNC", Const, 0},
    +		{"STT_GNU_IFUNC", Const, 23},
     		{"STT_HIOS", Const, 0},
     		{"STT_HIPROC", Const, 0},
     		{"STT_LOOS", Const, 0},
     		{"STT_LOPROC", Const, 0},
     		{"STT_NOTYPE", Const, 0},
     		{"STT_OBJECT", Const, 0},
    +		{"STT_RELC", Const, 23},
     		{"STT_SECTION", Const, 0},
    +		{"STT_SRELC", Const, 23},
     		{"STT_TLS", Const, 0},
     		{"STV_DEFAULT", Const, 0},
     		{"STV_HIDDEN", Const, 0},
    @@ -3696,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"SymType", Type, 0},
     		{"SymVis", Type, 0},
     		{"Symbol", Type, 0},
    +		{"Symbol.HasVersion", Field, 24},
     		{"Symbol.Info", Field, 0},
     		{"Symbol.Library", Field, 13},
     		{"Symbol.Name", Field, 0},
    @@ -3704,8 +3845,13 @@ var PackageSymbols = map[string][]Symbol{
     		{"Symbol.Size", Field, 0},
     		{"Symbol.Value", Field, 0},
     		{"Symbol.Version", Field, 13},
    +		{"Symbol.VersionIndex", Field, 24},
     		{"Type", Type, 0},
    +		{"VER_FLG_BASE", Const, 24},
    +		{"VER_FLG_INFO", Const, 24},
    +		{"VER_FLG_WEAK", Const, 24},
     		{"Version", Type, 0},
    +		{"VersionIndex", Type, 24},
     	},
     	"debug/gosym": {
     		{"(*DecodingError).Error", Method, 0},
    @@ -4431,8 +4577,10 @@ var PackageSymbols = map[string][]Symbol{
     		{"FS", Type, 16},
     	},
     	"encoding": {
    +		{"BinaryAppender", Type, 24},
     		{"BinaryMarshaler", Type, 2},
     		{"BinaryUnmarshaler", Type, 2},
    +		{"TextAppender", Type, 24},
     		{"TextMarshaler", Type, 2},
     		{"TextUnmarshaler", Type, 2},
     	},
    @@ -4544,11 +4692,14 @@ var PackageSymbols = map[string][]Symbol{
     		{"URLEncoding", Var, 0},
     	},
     	"encoding/binary": {
    +		{"Append", Func, 23},
     		{"AppendByteOrder", Type, 19},
     		{"AppendUvarint", Func, 19},
     		{"AppendVarint", Func, 19},
     		{"BigEndian", Var, 0},
     		{"ByteOrder", Type, 0},
    +		{"Decode", Func, 23},
    +		{"Encode", Func, 23},
     		{"LittleEndian", Var, 0},
     		{"MaxVarintLen16", Const, 0},
     		{"MaxVarintLen32", Const, 0},
    @@ -5308,6 +5459,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"ParenExpr.Rparen", Field, 0},
     		{"ParenExpr.X", Field, 0},
     		{"Pkg", Const, 0},
    +		{"Preorder", Func, 23},
     		{"Print", Func, 0},
     		{"RECV", Const, 0},
     		{"RangeStmt", Type, 0},
    @@ -5898,7 +6050,12 @@ var PackageSymbols = map[string][]Symbol{
     	},
     	"go/types": {
     		{"(*Alias).Obj", Method, 22},
    +		{"(*Alias).Origin", Method, 23},
    +		{"(*Alias).Rhs", Method, 23},
    +		{"(*Alias).SetTypeParams", Method, 23},
     		{"(*Alias).String", Method, 22},
    +		{"(*Alias).TypeArgs", Method, 23},
    +		{"(*Alias).TypeParams", Method, 23},
     		{"(*Alias).Underlying", Method, 22},
     		{"(*ArgumentError).Error", Method, 18},
     		{"(*ArgumentError).Unwrap", Method, 18},
    @@ -5943,6 +6100,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Func).Pkg", Method, 5},
     		{"(*Func).Pos", Method, 5},
     		{"(*Func).Scope", Method, 5},
    +		{"(*Func).Signature", Method, 23},
     		{"(*Func).String", Method, 5},
     		{"(*Func).Type", Method, 5},
     		{"(*Info).ObjectOf", Method, 5},
    @@ -5952,13 +6110,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Interface).Complete", Method, 5},
     		{"(*Interface).Embedded", Method, 5},
     		{"(*Interface).EmbeddedType", Method, 11},
    +		{"(*Interface).EmbeddedTypes", Method, 24},
     		{"(*Interface).Empty", Method, 5},
     		{"(*Interface).ExplicitMethod", Method, 5},
    +		{"(*Interface).ExplicitMethods", Method, 24},
     		{"(*Interface).IsComparable", Method, 18},
     		{"(*Interface).IsImplicit", Method, 18},
     		{"(*Interface).IsMethodSet", Method, 18},
     		{"(*Interface).MarkImplicit", Method, 18},
     		{"(*Interface).Method", Method, 5},
    +		{"(*Interface).Methods", Method, 24},
     		{"(*Interface).NumEmbeddeds", Method, 5},
     		{"(*Interface).NumExplicitMethods", Method, 5},
     		{"(*Interface).NumMethods", Method, 5},
    @@ -5979,9 +6140,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*MethodSet).At", Method, 5},
     		{"(*MethodSet).Len", Method, 5},
     		{"(*MethodSet).Lookup", Method, 5},
    +		{"(*MethodSet).Methods", Method, 24},
     		{"(*MethodSet).String", Method, 5},
     		{"(*Named).AddMethod", Method, 5},
     		{"(*Named).Method", Method, 5},
    +		{"(*Named).Methods", Method, 24},
     		{"(*Named).NumMethods", Method, 5},
     		{"(*Named).Obj", Method, 5},
     		{"(*Named).Origin", Method, 18},
    @@ -6022,6 +6185,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Pointer).String", Method, 5},
     		{"(*Pointer).Underlying", Method, 5},
     		{"(*Scope).Child", Method, 5},
    +		{"(*Scope).Children", Method, 24},
     		{"(*Scope).Contains", Method, 5},
     		{"(*Scope).End", Method, 5},
     		{"(*Scope).Innermost", Method, 5},
    @@ -6057,6 +6221,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*StdSizes).Offsetsof", Method, 5},
     		{"(*StdSizes).Sizeof", Method, 5},
     		{"(*Struct).Field", Method, 5},
    +		{"(*Struct).Fields", Method, 24},
     		{"(*Struct).NumFields", Method, 5},
     		{"(*Struct).String", Method, 5},
     		{"(*Struct).Tag", Method, 5},
    @@ -6068,8 +6233,10 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Tuple).Len", Method, 5},
     		{"(*Tuple).String", Method, 5},
     		{"(*Tuple).Underlying", Method, 5},
    +		{"(*Tuple).Variables", Method, 24},
     		{"(*TypeList).At", Method, 18},
     		{"(*TypeList).Len", Method, 18},
    +		{"(*TypeList).Types", Method, 24},
     		{"(*TypeName).Exported", Method, 5},
     		{"(*TypeName).Id", Method, 5},
     		{"(*TypeName).IsAlias", Method, 9},
    @@ -6087,9 +6254,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*TypeParam).Underlying", Method, 18},
     		{"(*TypeParamList).At", Method, 18},
     		{"(*TypeParamList).Len", Method, 18},
    +		{"(*TypeParamList).TypeParams", Method, 24},
     		{"(*Union).Len", Method, 18},
     		{"(*Union).String", Method, 18},
     		{"(*Union).Term", Method, 18},
    +		{"(*Union).Terms", Method, 24},
     		{"(*Union).Underlying", Method, 18},
     		{"(*Var).Anonymous", Method, 5},
     		{"(*Var).Embedded", Method, 11},
    @@ -6360,10 +6529,12 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Hash).WriteByte", Method, 14},
     		{"(*Hash).WriteString", Method, 14},
     		{"Bytes", Func, 19},
    +		{"Comparable", Func, 24},
     		{"Hash", Type, 14},
     		{"MakeSeed", Func, 14},
     		{"Seed", Type, 14},
     		{"String", Func, 19},
    +		{"WriteComparable", Func, 24},
     	},
     	"html": {
     		{"EscapeString", Func, 0},
    @@ -6948,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"FormatFileInfo", Func, 21},
     		{"Glob", Func, 16},
     		{"GlobFS", Type, 16},
    +		{"Lstat", Func, 25},
     		{"ModeAppend", Const, 16},
     		{"ModeCharDevice", Const, 16},
     		{"ModeDevice", Const, 16},
    @@ -6972,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"ReadDirFile", Type, 16},
     		{"ReadFile", Func, 16},
     		{"ReadFileFS", Type, 16},
    +		{"ReadLink", Func, 25},
    +		{"ReadLinkFS", Type, 25},
     		{"SkipAll", Var, 20},
     		{"SkipDir", Var, 16},
     		{"Stat", Func, 16},
    @@ -6992,6 +7166,12 @@ var PackageSymbols = map[string][]Symbol{
     		{"TempFile", Func, 0},
     		{"WriteFile", Func, 0},
     	},
    +	"iter": {
    +		{"Pull", Func, 23},
    +		{"Pull2", Func, 23},
    +		{"Seq", Type, 23},
    +		{"Seq2", Type, 23},
    +	},
     	"log": {
     		{"(*Logger).Fatal", Method, 0},
     		{"(*Logger).Fatalf", Method, 0},
    @@ -7044,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*JSONHandler).WithGroup", Method, 21},
     		{"(*Level).UnmarshalJSON", Method, 21},
     		{"(*Level).UnmarshalText", Method, 21},
    +		{"(*LevelVar).AppendText", Method, 24},
     		{"(*LevelVar).Level", Method, 21},
     		{"(*LevelVar).MarshalText", Method, 21},
     		{"(*LevelVar).Set", Method, 21},
    @@ -7072,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Attr).Equal", Method, 21},
     		{"(Attr).String", Method, 21},
     		{"(Kind).String", Method, 21},
    +		{"(Level).AppendText", Method, 24},
     		{"(Level).Level", Method, 21},
     		{"(Level).MarshalJSON", Method, 21},
     		{"(Level).MarshalText", Method, 21},
    @@ -7102,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Debug", Func, 21},
     		{"DebugContext", Func, 21},
     		{"Default", Func, 21},
    +		{"DiscardHandler", Var, 24},
     		{"Duration", Func, 21},
     		{"DurationValue", Func, 21},
     		{"Error", Func, 21},
    @@ -7222,11 +7405,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"Writer", Type, 0},
     	},
     	"maps": {
    +		{"All", Func, 23},
     		{"Clone", Func, 21},
    +		{"Collect", Func, 23},
     		{"Copy", Func, 21},
     		{"DeleteFunc", Func, 21},
     		{"Equal", Func, 21},
     		{"EqualFunc", Func, 21},
    +		{"Insert", Func, 23},
    +		{"Keys", Func, 23},
    +		{"Values", Func, 23},
     	},
     	"math": {
     		{"Abs", Func, 0},
    @@ -7332,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Float).Acc", Method, 5},
     		{"(*Float).Add", Method, 5},
     		{"(*Float).Append", Method, 5},
    +		{"(*Float).AppendText", Method, 24},
     		{"(*Float).Cmp", Method, 5},
     		{"(*Float).Copy", Method, 5},
     		{"(*Float).Float32", Method, 5},
    @@ -7378,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Int).And", Method, 0},
     		{"(*Int).AndNot", Method, 0},
     		{"(*Int).Append", Method, 6},
    +		{"(*Int).AppendText", Method, 24},
     		{"(*Int).Binomial", Method, 0},
     		{"(*Int).Bit", Method, 0},
     		{"(*Int).BitLen", Method, 0},
    @@ -7434,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Int).Xor", Method, 0},
     		{"(*Rat).Abs", Method, 0},
     		{"(*Rat).Add", Method, 0},
    +		{"(*Rat).AppendText", Method, 24},
     		{"(*Rat).Cmp", Method, 0},
     		{"(*Rat).Denom", Method, 0},
     		{"(*Rat).Float32", Method, 4},
    @@ -7616,10 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
     		{"Zipf", Type, 0},
     	},
     	"math/rand/v2": {
    +		{"(*ChaCha8).AppendBinary", Method, 24},
     		{"(*ChaCha8).MarshalBinary", Method, 22},
    +		{"(*ChaCha8).Read", Method, 23},
     		{"(*ChaCha8).Seed", Method, 22},
     		{"(*ChaCha8).Uint64", Method, 22},
     		{"(*ChaCha8).UnmarshalBinary", Method, 22},
    +		{"(*PCG).AppendBinary", Method, 24},
     		{"(*PCG).MarshalBinary", Method, 22},
     		{"(*PCG).Seed", Method, 22},
     		{"(*PCG).Uint64", Method, 22},
    @@ -7636,6 +7830,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Rand).NormFloat64", Method, 22},
     		{"(*Rand).Perm", Method, 22},
     		{"(*Rand).Shuffle", Method, 22},
    +		{"(*Rand).Uint", Method, 23},
     		{"(*Rand).Uint32", Method, 22},
     		{"(*Rand).Uint32N", Method, 22},
     		{"(*Rand).Uint64", Method, 22},
    @@ -7663,6 +7858,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Rand", Type, 22},
     		{"Shuffle", Func, 22},
     		{"Source", Type, 22},
    +		{"Uint", Func, 23},
     		{"Uint32", Func, 22},
     		{"Uint32N", Func, 22},
     		{"Uint64", Func, 22},
    @@ -7743,6 +7939,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*DNSError).Error", Method, 0},
     		{"(*DNSError).Temporary", Method, 0},
     		{"(*DNSError).Timeout", Method, 0},
    +		{"(*DNSError).Unwrap", Method, 23},
     		{"(*Dialer).Dial", Method, 1},
     		{"(*Dialer).DialContext", Method, 7},
     		{"(*Dialer).MultipathTCP", Method, 21},
    @@ -7809,6 +8006,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*TCPConn).RemoteAddr", Method, 0},
     		{"(*TCPConn).SetDeadline", Method, 0},
     		{"(*TCPConn).SetKeepAlive", Method, 0},
    +		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
     		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
     		{"(*TCPConn).SetLinger", Method, 0},
     		{"(*TCPConn).SetNoDelay", Method, 0},
    @@ -7883,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*UnixListener).SyscallConn", Method, 10},
     		{"(Flags).String", Method, 0},
     		{"(HardwareAddr).String", Method, 0},
    +		{"(IP).AppendText", Method, 24},
     		{"(IP).DefaultMask", Method, 0},
     		{"(IP).Equal", Method, 0},
     		{"(IP).IsGlobalUnicast", Method, 0},
    @@ -7922,6 +8121,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"DNSError.IsTimeout", Field, 0},
     		{"DNSError.Name", Field, 0},
     		{"DNSError.Server", Field, 0},
    +		{"DNSError.UnwrapErr", Field, 23},
     		{"DefaultResolver", Var, 8},
     		{"Dial", Func, 0},
     		{"DialIP", Func, 0},
    @@ -7937,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Dialer.DualStack", Field, 2},
     		{"Dialer.FallbackDelay", Field, 5},
     		{"Dialer.KeepAlive", Field, 3},
    +		{"Dialer.KeepAliveConfig", Field, 23},
     		{"Dialer.LocalAddr", Field, 1},
     		{"Dialer.Resolver", Field, 8},
     		{"Dialer.Timeout", Field, 1},
    @@ -7989,10 +8190,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"Interfaces", Func, 0},
     		{"InvalidAddrError", Type, 0},
     		{"JoinHostPort", Func, 0},
    +		{"KeepAliveConfig", Type, 23},
    +		{"KeepAliveConfig.Count", Field, 23},
    +		{"KeepAliveConfig.Enable", Field, 23},
    +		{"KeepAliveConfig.Idle", Field, 23},
    +		{"KeepAliveConfig.Interval", Field, 23},
     		{"Listen", Func, 0},
     		{"ListenConfig", Type, 11},
     		{"ListenConfig.Control", Field, 11},
     		{"ListenConfig.KeepAlive", Field, 13},
    +		{"ListenConfig.KeepAliveConfig", Field, 23},
     		{"ListenIP", Func, 0},
     		{"ListenMulticastUDP", Func, 0},
     		{"ListenPacket", Func, 0},
    @@ -8075,12 +8282,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*MaxBytesError).Error", Method, 19},
     		{"(*ProtocolError).Error", Method, 0},
     		{"(*ProtocolError).Is", Method, 21},
    +		{"(*Protocols).SetHTTP1", Method, 24},
    +		{"(*Protocols).SetHTTP2", Method, 24},
    +		{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
     		{"(*Request).AddCookie", Method, 0},
     		{"(*Request).BasicAuth", Method, 4},
     		{"(*Request).Clone", Method, 13},
     		{"(*Request).Context", Method, 7},
     		{"(*Request).Cookie", Method, 0},
     		{"(*Request).Cookies", Method, 0},
    +		{"(*Request).CookiesNamed", Method, 23},
     		{"(*Request).FormFile", Method, 0},
     		{"(*Request).FormValue", Method, 0},
     		{"(*Request).MultipartReader", Method, 0},
    @@ -8133,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Header).Values", Method, 14},
     		{"(Header).Write", Method, 0},
     		{"(Header).WriteSubset", Method, 0},
    +		{"(Protocols).HTTP1", Method, 24},
    +		{"(Protocols).HTTP2", Method, 24},
    +		{"(Protocols).String", Method, 24},
    +		{"(Protocols).UnencryptedHTTP2", Method, 24},
     		{"AllowQuerySemicolons", Func, 17},
     		{"CanonicalHeaderKey", Func, 0},
     		{"Client", Type, 0},
    @@ -8148,7 +8363,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"Cookie.HttpOnly", Field, 0},
     		{"Cookie.MaxAge", Field, 0},
     		{"Cookie.Name", Field, 0},
    +		{"Cookie.Partitioned", Field, 23},
     		{"Cookie.Path", Field, 0},
    +		{"Cookie.Quoted", Field, 23},
     		{"Cookie.Raw", Field, 0},
     		{"Cookie.RawExpires", Field, 0},
     		{"Cookie.SameSite", Field, 11},
    @@ -8193,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
     		{"FileSystem", Type, 0},
     		{"Flusher", Type, 0},
     		{"Get", Func, 0},
    +		{"HTTP2Config", Type, 24},
    +		{"HTTP2Config.CountError", Field, 24},
    +		{"HTTP2Config.MaxConcurrentStreams", Field, 24},
    +		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
    +		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
    +		{"HTTP2Config.MaxReadFrameSize", Field, 24},
    +		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
    +		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
    +		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
    +		{"HTTP2Config.PingTimeout", Field, 24},
    +		{"HTTP2Config.SendPingTimeout", Field, 24},
    +		{"HTTP2Config.WriteByteTimeout", Field, 24},
     		{"Handle", Func, 0},
     		{"HandleFunc", Func, 0},
     		{"Handler", Type, 0},
    @@ -8225,12 +8454,15 @@ var PackageSymbols = map[string][]Symbol{
     		{"NoBody", Var, 8},
     		{"NotFound", Func, 0},
     		{"NotFoundHandler", Func, 0},
    +		{"ParseCookie", Func, 23},
     		{"ParseHTTPVersion", Func, 0},
    +		{"ParseSetCookie", Func, 23},
     		{"ParseTime", Func, 1},
     		{"Post", Func, 0},
     		{"PostForm", Func, 0},
     		{"ProtocolError", Type, 0},
     		{"ProtocolError.ErrorString", Field, 0},
    +		{"Protocols", Type, 24},
     		{"ProxyFromEnvironment", Func, 0},
     		{"ProxyURL", Func, 0},
     		{"PushOptions", Type, 8},
    @@ -8252,6 +8484,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Request.Host", Field, 0},
     		{"Request.Method", Field, 0},
     		{"Request.MultipartForm", Field, 0},
    +		{"Request.Pattern", Field, 23},
     		{"Request.PostForm", Field, 1},
     		{"Request.Proto", Field, 0},
     		{"Request.ProtoMajor", Field, 0},
    @@ -8299,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"Server.ConnState", Field, 3},
     		{"Server.DisableGeneralOptionsHandler", Field, 20},
     		{"Server.ErrorLog", Field, 3},
    +		{"Server.HTTP2", Field, 24},
     		{"Server.Handler", Field, 0},
     		{"Server.IdleTimeout", Field, 8},
     		{"Server.MaxHeaderBytes", Field, 0},
    +		{"Server.Protocols", Field, 24},
     		{"Server.ReadHeaderTimeout", Field, 8},
     		{"Server.ReadTimeout", Field, 0},
     		{"Server.TLSConfig", Field, 0},
    @@ -8391,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
     		{"Transport.ExpectContinueTimeout", Field, 6},
     		{"Transport.ForceAttemptHTTP2", Field, 13},
     		{"Transport.GetProxyConnectHeader", Field, 16},
    +		{"Transport.HTTP2", Field, 24},
     		{"Transport.IdleConnTimeout", Field, 7},
     		{"Transport.MaxConnsPerHost", Field, 11},
     		{"Transport.MaxIdleConns", Field, 7},
     		{"Transport.MaxIdleConnsPerHost", Field, 0},
     		{"Transport.MaxResponseHeaderBytes", Field, 7},
     		{"Transport.OnProxyConnectResponse", Field, 20},
    +		{"Transport.Protocols", Field, 24},
     		{"Transport.Proxy", Field, 0},
     		{"Transport.ProxyConnectHeader", Field, 8},
     		{"Transport.ReadBufferSize", Field, 13},
    @@ -8453,6 +8690,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"DefaultRemoteAddr", Const, 0},
     		{"NewRecorder", Func, 0},
     		{"NewRequest", Func, 7},
    +		{"NewRequestWithContext", Func, 23},
     		{"NewServer", Func, 0},
     		{"NewTLSServer", Func, 0},
     		{"NewUnstartedServer", Func, 0},
    @@ -8583,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*AddrPort).UnmarshalText", Method, 18},
     		{"(*Prefix).UnmarshalBinary", Method, 18},
     		{"(*Prefix).UnmarshalText", Method, 18},
    +		{"(Addr).AppendBinary", Method, 24},
    +		{"(Addr).AppendText", Method, 24},
     		{"(Addr).AppendTo", Method, 18},
     		{"(Addr).As16", Method, 18},
     		{"(Addr).As4", Method, 18},
    @@ -8613,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Addr).WithZone", Method, 18},
     		{"(Addr).Zone", Method, 18},
     		{"(AddrPort).Addr", Method, 18},
    +		{"(AddrPort).AppendBinary", Method, 24},
    +		{"(AddrPort).AppendText", Method, 24},
     		{"(AddrPort).AppendTo", Method, 18},
     		{"(AddrPort).Compare", Method, 22},
     		{"(AddrPort).IsValid", Method, 18},
    @@ -8621,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(AddrPort).Port", Method, 18},
     		{"(AddrPort).String", Method, 18},
     		{"(Prefix).Addr", Method, 18},
    +		{"(Prefix).AppendBinary", Method, 24},
    +		{"(Prefix).AppendText", Method, 24},
     		{"(Prefix).AppendTo", Method, 18},
     		{"(Prefix).Bits", Method, 18},
     		{"(Prefix).Contains", Method, 18},
    @@ -8805,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Error).Temporary", Method, 6},
     		{"(*Error).Timeout", Method, 6},
     		{"(*Error).Unwrap", Method, 13},
    +		{"(*URL).AppendBinary", Method, 24},
     		{"(*URL).EscapedFragment", Method, 15},
     		{"(*URL).EscapedPath", Method, 5},
     		{"(*URL).Hostname", Method, 8},
    @@ -8904,6 +9149,19 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*ProcessState).SysUsage", Method, 0},
     		{"(*ProcessState).SystemTime", Method, 0},
     		{"(*ProcessState).UserTime", Method, 0},
    +		{"(*Root).Chmod", Method, 25},
    +		{"(*Root).Chown", Method, 25},
    +		{"(*Root).Close", Method, 24},
    +		{"(*Root).Create", Method, 24},
    +		{"(*Root).FS", Method, 24},
    +		{"(*Root).Lstat", Method, 24},
    +		{"(*Root).Mkdir", Method, 24},
    +		{"(*Root).Name", Method, 24},
    +		{"(*Root).Open", Method, 24},
    +		{"(*Root).OpenFile", Method, 24},
    +		{"(*Root).OpenRoot", Method, 24},
    +		{"(*Root).Remove", Method, 24},
    +		{"(*Root).Stat", Method, 24},
     		{"(*SyscallError).Error", Method, 0},
     		{"(*SyscallError).Timeout", Method, 10},
     		{"(*SyscallError).Unwrap", Method, 13},
    @@ -8917,6 +9175,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Chown", Func, 0},
     		{"Chtimes", Func, 0},
     		{"Clearenv", Func, 0},
    +		{"CopyFS", Func, 23},
     		{"Create", Func, 0},
     		{"CreateTemp", Func, 16},
     		{"DevNull", Const, 0},
    @@ -8996,6 +9255,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"O_WRONLY", Const, 0},
     		{"Open", Func, 0},
     		{"OpenFile", Func, 0},
    +		{"OpenInRoot", Func, 24},
    +		{"OpenRoot", Func, 24},
     		{"PathError", Type, 0},
     		{"PathError.Err", Field, 0},
     		{"PathError.Op", Field, 0},
    @@ -9017,6 +9278,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Remove", Func, 0},
     		{"RemoveAll", Func, 0},
     		{"Rename", Func, 0},
    +		{"Root", Type, 24},
     		{"SEEK_CUR", Const, 0},
     		{"SEEK_END", Const, 0},
     		{"SEEK_SET", Const, 0},
    @@ -9150,6 +9412,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"IsLocal", Func, 20},
     		{"Join", Func, 0},
     		{"ListSeparator", Const, 0},
    +		{"Localize", Func, 23},
     		{"Match", Func, 0},
     		{"Rel", Func, 0},
     		{"Separator", Const, 0},
    @@ -9232,6 +9495,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Value).Pointer", Method, 0},
     		{"(Value).Recv", Method, 0},
     		{"(Value).Send", Method, 0},
    +		{"(Value).Seq", Method, 23},
    +		{"(Value).Seq2", Method, 23},
     		{"(Value).Set", Method, 0},
     		{"(Value).SetBool", Method, 0},
     		{"(Value).SetBytes", Method, 0},
    @@ -9314,6 +9579,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"SelectSend", Const, 1},
     		{"SendDir", Const, 0},
     		{"Slice", Const, 0},
    +		{"SliceAt", Func, 23},
     		{"SliceHeader", Type, 0},
     		{"SliceHeader.Cap", Field, 0},
     		{"SliceHeader.Data", Field, 0},
    @@ -9354,6 +9620,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Zero", Func, 0},
     	},
     	"regexp": {
    +		{"(*Regexp).AppendText", Method, 24},
     		{"(*Regexp).Copy", Method, 6},
     		{"(*Regexp).Expand", Method, 0},
     		{"(*Regexp).ExpandString", Method, 0},
    @@ -9534,6 +9801,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*StackRecord).Stack", Method, 0},
     		{"(*TypeAssertionError).Error", Method, 0},
     		{"(*TypeAssertionError).RuntimeError", Method, 0},
    +		{"(Cleanup).Stop", Method, 24},
    +		{"AddCleanup", Func, 24},
     		{"BlockProfile", Func, 1},
     		{"BlockProfileRecord", Type, 1},
     		{"BlockProfileRecord.Count", Field, 1},
    @@ -9544,6 +9813,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Caller", Func, 0},
     		{"Callers", Func, 0},
     		{"CallersFrames", Func, 7},
    +		{"Cleanup", Type, 24},
     		{"Compiler", Const, 0},
     		{"Error", Type, 0},
     		{"Frame", Type, 7},
    @@ -9655,6 +9925,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"BuildSetting", Type, 18},
     		{"BuildSetting.Key", Field, 18},
     		{"BuildSetting.Value", Field, 18},
    +		{"CrashOptions", Type, 23},
     		{"FreeOSMemory", Func, 1},
     		{"GCStats", Type, 1},
     		{"GCStats.LastGC", Field, 1},
    @@ -9672,6 +9943,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"PrintStack", Func, 0},
     		{"ReadBuildInfo", Func, 12},
     		{"ReadGCStats", Func, 1},
    +		{"SetCrashOutput", Func, 23},
     		{"SetGCPercent", Func, 1},
     		{"SetMaxStack", Func, 2},
     		{"SetMaxThreads", Func, 2},
    @@ -9742,10 +10014,15 @@ var PackageSymbols = map[string][]Symbol{
     		{"WithRegion", Func, 11},
     	},
     	"slices": {
    +		{"All", Func, 23},
    +		{"AppendSeq", Func, 23},
    +		{"Backward", Func, 23},
     		{"BinarySearch", Func, 21},
     		{"BinarySearchFunc", Func, 21},
    +		{"Chunk", Func, 23},
     		{"Clip", Func, 21},
     		{"Clone", Func, 21},
    +		{"Collect", Func, 23},
     		{"Compact", Func, 21},
     		{"CompactFunc", Func, 21},
     		{"Compare", Func, 21},
    @@ -9767,11 +10044,16 @@ var PackageSymbols = map[string][]Symbol{
     		{"MaxFunc", Func, 21},
     		{"Min", Func, 21},
     		{"MinFunc", Func, 21},
    +		{"Repeat", Func, 23},
     		{"Replace", Func, 21},
     		{"Reverse", Func, 21},
     		{"Sort", Func, 21},
     		{"SortFunc", Func, 21},
     		{"SortStableFunc", Func, 21},
    +		{"Sorted", Func, 23},
    +		{"SortedFunc", Func, 23},
    +		{"SortedStableFunc", Func, 23},
    +		{"Values", Func, 23},
     	},
     	"sort": {
     		{"(Float64Slice).Len", Method, 0},
    @@ -9894,6 +10176,8 @@ var PackageSymbols = map[string][]Symbol{
     		{"EqualFold", Func, 0},
     		{"Fields", Func, 0},
     		{"FieldsFunc", Func, 0},
    +		{"FieldsFuncSeq", Func, 24},
    +		{"FieldsSeq", Func, 24},
     		{"HasPrefix", Func, 0},
     		{"HasSuffix", Func, 0},
     		{"Index", Func, 0},
    @@ -9906,6 +10190,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"LastIndexAny", Func, 0},
     		{"LastIndexByte", Func, 5},
     		{"LastIndexFunc", Func, 0},
    +		{"Lines", Func, 24},
     		{"Map", Func, 0},
     		{"NewReader", Func, 0},
     		{"NewReplacer", Func, 0},
    @@ -9917,7 +10202,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"Split", Func, 0},
     		{"SplitAfter", Func, 0},
     		{"SplitAfterN", Func, 0},
    +		{"SplitAfterSeq", Func, 24},
     		{"SplitN", Func, 0},
    +		{"SplitSeq", Func, 24},
     		{"Title", Func, 0},
     		{"ToLower", Func, 0},
     		{"ToLowerSpecial", Func, 0},
    @@ -9936,10 +10223,14 @@ var PackageSymbols = map[string][]Symbol{
     		{"TrimSpace", Func, 0},
     		{"TrimSuffix", Func, 1},
     	},
    +	"structs": {
    +		{"HostLayout", Type, 23},
    +	},
     	"sync": {
     		{"(*Cond).Broadcast", Method, 0},
     		{"(*Cond).Signal", Method, 0},
     		{"(*Cond).Wait", Method, 0},
    +		{"(*Map).Clear", Method, 23},
     		{"(*Map).CompareAndDelete", Method, 20},
     		{"(*Map).CompareAndSwap", Method, 20},
     		{"(*Map).Delete", Method, 9},
    @@ -9986,13 +10277,17 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Bool).Store", Method, 19},
     		{"(*Bool).Swap", Method, 19},
     		{"(*Int32).Add", Method, 19},
    +		{"(*Int32).And", Method, 23},
     		{"(*Int32).CompareAndSwap", Method, 19},
     		{"(*Int32).Load", Method, 19},
    +		{"(*Int32).Or", Method, 23},
     		{"(*Int32).Store", Method, 19},
     		{"(*Int32).Swap", Method, 19},
     		{"(*Int64).Add", Method, 19},
    +		{"(*Int64).And", Method, 23},
     		{"(*Int64).CompareAndSwap", Method, 19},
     		{"(*Int64).Load", Method, 19},
    +		{"(*Int64).Or", Method, 23},
     		{"(*Int64).Store", Method, 19},
     		{"(*Int64).Swap", Method, 19},
     		{"(*Pointer).CompareAndSwap", Method, 19},
    @@ -10000,18 +10295,24 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*Pointer).Store", Method, 19},
     		{"(*Pointer).Swap", Method, 19},
     		{"(*Uint32).Add", Method, 19},
    +		{"(*Uint32).And", Method, 23},
     		{"(*Uint32).CompareAndSwap", Method, 19},
     		{"(*Uint32).Load", Method, 19},
    +		{"(*Uint32).Or", Method, 23},
     		{"(*Uint32).Store", Method, 19},
     		{"(*Uint32).Swap", Method, 19},
     		{"(*Uint64).Add", Method, 19},
    +		{"(*Uint64).And", Method, 23},
     		{"(*Uint64).CompareAndSwap", Method, 19},
     		{"(*Uint64).Load", Method, 19},
    +		{"(*Uint64).Or", Method, 23},
     		{"(*Uint64).Store", Method, 19},
     		{"(*Uint64).Swap", Method, 19},
     		{"(*Uintptr).Add", Method, 19},
    +		{"(*Uintptr).And", Method, 23},
     		{"(*Uintptr).CompareAndSwap", Method, 19},
     		{"(*Uintptr).Load", Method, 19},
    +		{"(*Uintptr).Or", Method, 23},
     		{"(*Uintptr).Store", Method, 19},
     		{"(*Uintptr).Swap", Method, 19},
     		{"(*Value).CompareAndSwap", Method, 17},
    @@ -10023,6 +10324,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"AddUint32", Func, 0},
     		{"AddUint64", Func, 0},
     		{"AddUintptr", Func, 0},
    +		{"AndInt32", Func, 23},
    +		{"AndInt64", Func, 23},
    +		{"AndUint32", Func, 23},
    +		{"AndUint64", Func, 23},
    +		{"AndUintptr", Func, 23},
     		{"Bool", Type, 19},
     		{"CompareAndSwapInt32", Func, 0},
     		{"CompareAndSwapInt64", Func, 0},
    @@ -10038,6 +10344,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"LoadUint32", Func, 0},
     		{"LoadUint64", Func, 0},
     		{"LoadUintptr", Func, 0},
    +		{"OrInt32", Func, 23},
    +		{"OrInt64", Func, 23},
    +		{"OrUint32", Func, 23},
    +		{"OrUint64", Func, 23},
    +		{"OrUintptr", Func, 23},
     		{"Pointer", Type, 19},
     		{"StoreInt32", Func, 0},
     		{"StoreInt64", Func, 0},
    @@ -16200,6 +16511,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"WSAEACCES", Const, 2},
     		{"WSAECONNABORTED", Const, 9},
     		{"WSAECONNRESET", Const, 3},
    +		{"WSAENOPROTOOPT", Const, 23},
     		{"WSAEnumProtocols", Func, 2},
     		{"WSAID_CONNECTEX", Var, 1},
     		{"WSAIoctl", Func, 0},
    @@ -16308,7 +16620,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"ValueOf", Func, 0},
     	},
     	"testing": {
    +		{"(*B).Chdir", Method, 24},
     		{"(*B).Cleanup", Method, 14},
    +		{"(*B).Context", Method, 24},
     		{"(*B).Elapsed", Method, 20},
     		{"(*B).Error", Method, 0},
     		{"(*B).Errorf", Method, 0},
    @@ -16320,6 +16634,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*B).Helper", Method, 9},
     		{"(*B).Log", Method, 0},
     		{"(*B).Logf", Method, 0},
    +		{"(*B).Loop", Method, 24},
     		{"(*B).Name", Method, 8},
     		{"(*B).ReportAllocs", Method, 1},
     		{"(*B).ReportMetric", Method, 13},
    @@ -16337,7 +16652,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*B).StopTimer", Method, 0},
     		{"(*B).TempDir", Method, 15},
     		{"(*F).Add", Method, 18},
    +		{"(*F).Chdir", Method, 24},
     		{"(*F).Cleanup", Method, 18},
    +		{"(*F).Context", Method, 24},
     		{"(*F).Error", Method, 18},
     		{"(*F).Errorf", Method, 18},
     		{"(*F).Fail", Method, 18},
    @@ -16358,7 +16675,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"(*F).TempDir", Method, 18},
     		{"(*M).Run", Method, 4},
     		{"(*PB).Next", Method, 3},
    +		{"(*T).Chdir", Method, 24},
     		{"(*T).Cleanup", Method, 14},
    +		{"(*T).Context", Method, 24},
     		{"(*T).Deadline", Method, 15},
     		{"(*T).Error", Method, 0},
     		{"(*T).Errorf", Method, 0},
    @@ -16440,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{
     	},
     	"testing/fstest": {
     		{"(MapFS).Glob", Method, 16},
    +		{"(MapFS).Lstat", Method, 25},
     		{"(MapFS).Open", Method, 16},
     		{"(MapFS).ReadDir", Method, 16},
     		{"(MapFS).ReadFile", Method, 16},
    +		{"(MapFS).ReadLink", Method, 25},
     		{"(MapFS).Stat", Method, 16},
     		{"(MapFS).Sub", Method, 16},
     		{"MapFS", Type, 16},
    @@ -16849,7 +17170,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"(Time).Add", Method, 0},
     		{"(Time).AddDate", Method, 0},
     		{"(Time).After", Method, 0},
    +		{"(Time).AppendBinary", Method, 24},
     		{"(Time).AppendFormat", Method, 5},
    +		{"(Time).AppendText", Method, 24},
     		{"(Time).Before", Method, 0},
     		{"(Time).Clock", Method, 0},
     		{"(Time).Compare", Method, 20},
    @@ -17284,6 +17607,7 @@ var PackageSymbols = map[string][]Symbol{
     		{"Encode", Func, 0},
     		{"EncodeRune", Func, 0},
     		{"IsSurrogate", Func, 0},
    +		{"RuneLen", Func, 23},
     	},
     	"unicode/utf8": {
     		{"AppendRune", Func, 18},
    @@ -17306,6 +17630,11 @@ var PackageSymbols = map[string][]Symbol{
     		{"ValidRune", Func, 1},
     		{"ValidString", Func, 0},
     	},
    +	"unique": {
    +		{"(Handle).Value", Method, 23},
    +		{"Handle", Type, 23},
    +		{"Make", Func, 23},
    +	},
     	"unsafe": {
     		{"Add", Func, 0},
     		{"Alignof", Func, 0},
    @@ -17317,4 +17646,9 @@ var PackageSymbols = map[string][]Symbol{
     		{"String", Func, 0},
     		{"StringData", Func, 0},
     	},
    +	"weak": {
    +		{"(Pointer).Value", Method, 24},
    +		{"Make", Func, 24},
    +		{"Pointer", Type, 24},
    +	},
     }
    diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    index 98904017f2..3d96d3bf68 100644
    --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    @@ -6,7 +6,7 @@
     
     // Package stdlib provides a table of all exported symbols in the
     // standard library, along with the version at which they first
    -// appeared.
    +// appeared. It also provides the import graph of std packages.
     package stdlib
     
     import (
    diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
    deleted file mode 100644
    index ff9437a36c..0000000000
    --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
    +++ /dev/null
    @@ -1,137 +0,0 @@
    -// Copyright 2023 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -// package tokeninternal provides access to some internal features of the token
    -// package.
    -package tokeninternal
    -
    -import (
    -	"fmt"
    -	"go/token"
    -	"sort"
    -	"sync"
    -	"unsafe"
    -)
    -
    -// GetLines returns the table of line-start offsets from a token.File.
    -func GetLines(file *token.File) []int {
    -	// token.File has a Lines method on Go 1.21 and later.
    -	if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
    -		return file.Lines()
    -	}
    -
    -	// This declaration must match that of token.File.
    -	// This creates a risk of dependency skew.
    -	// For now we check that the size of the two
    -	// declarations is the same, on the (fragile) assumption
    -	// that future changes would add fields.
    -	type tokenFile119 struct {
    -		_     string
    -		_     int
    -		_     int
    -		mu    sync.Mutex // we're not complete monsters
    -		lines []int
    -		_     []struct{}
    -	}
    -
    -	if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
    -		panic("unexpected token.File size")
    -	}
    -	var ptr *tokenFile119
    -	type uP = unsafe.Pointer
    -	*(*uP)(uP(&ptr)) = uP(file)
    -	ptr.mu.Lock()
    -	defer ptr.mu.Unlock()
    -	return ptr.lines
    -}
    -
    -// AddExistingFiles adds the specified files to the FileSet if they
    -// are not already present. It panics if any pair of files in the
    -// resulting FileSet would overlap.
    -func AddExistingFiles(fset *token.FileSet, files []*token.File) {
    -	// Punch through the FileSet encapsulation.
    -	type tokenFileSet struct {
    -		// This type remained essentially consistent from go1.16 to go1.21.
    -		mutex sync.RWMutex
    -		base  int
    -		files []*token.File
    -		_     *token.File // changed to atomic.Pointer[token.File] in go1.19
    -	}
    -
    -	// If the size of token.FileSet changes, this will fail to compile.
    -	const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
    -	var _ [-delta * delta]int
    -
    -	type uP = unsafe.Pointer
    -	var ptr *tokenFileSet
    -	*(*uP)(uP(&ptr)) = uP(fset)
    -	ptr.mutex.Lock()
    -	defer ptr.mutex.Unlock()
    -
    -	// Merge and sort.
    -	newFiles := append(ptr.files, files...)
    -	sort.Slice(newFiles, func(i, j int) bool {
    -		return newFiles[i].Base() < newFiles[j].Base()
    -	})
    -
    -	// Reject overlapping files.
    -	// Discard adjacent identical files.
    -	out := newFiles[:0]
    -	for i, file := range newFiles {
    -		if i > 0 {
    -			prev := newFiles[i-1]
    -			if file == prev {
    -				continue
    -			}
    -			if prev.Base()+prev.Size()+1 > file.Base() {
    -				panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
    -					prev.Name(), prev.Base(), prev.Base()+prev.Size(),
    -					file.Name(), file.Base(), file.Base()+file.Size()))
    -			}
    -		}
    -		out = append(out, file)
    -	}
    -	newFiles = out
    -
    -	ptr.files = newFiles
    -
    -	// Advance FileSet.Base().
    -	if len(newFiles) > 0 {
    -		last := newFiles[len(newFiles)-1]
    -		newBase := last.Base() + last.Size() + 1
    -		if ptr.base < newBase {
    -			ptr.base = newBase
    -		}
    -	}
    -}
    -
    -// FileSetFor returns a new FileSet containing a sequence of new Files with
    -// the same base, size, and line as the input files, for use in APIs that
    -// require a FileSet.
    -//
    -// Precondition: the input files must be non-overlapping, and sorted in order
    -// of their Base.
    -func FileSetFor(files ...*token.File) *token.FileSet {
    -	fset := token.NewFileSet()
    -	for _, f := range files {
    -		f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
    -		lines := GetLines(f)
    -		f2.SetLines(lines)
    -	}
    -	return fset
    -}
    -
    -// CloneFileSet creates a new FileSet holding all files in fset. It does not
    -// create copies of the token.Files in fset: they are added to the resulting
    -// FileSet unmodified.
    -func CloneFileSet(fset *token.FileSet) *token.FileSet {
    -	var files []*token.File
    -	fset.Iterate(func(f *token.File) bool {
    -		files = append(files, f)
    -		return true
    -	})
    -	newFileSet := token.NewFileSet()
    -	AddExistingFiles(newFileSet, files)
    -	return newFileSet
    -}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
    new file mode 100644
    index 0000000000..cdae2b8e81
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
    @@ -0,0 +1,68 @@
    +// Copyright 2021 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package typeparams contains common utilities for writing tools that
    +// interact with generic Go code, as introduced with Go 1.18. It
    +// supplements the standard library APIs. Notably, the StructuralTerms
    +// API computes a minimal representation of the structural
    +// restrictions on a type parameter.
    +//
    +// An external version of these APIs is available in the
    +// golang.org/x/exp/typeparams module.
    +package typeparams
    +
    +import (
    +	"go/ast"
    +	"go/token"
    +	"go/types"
    +)
    +
    +// UnpackIndexExpr extracts data from AST nodes that represent index
    +// expressions.
    +//
    +// For an ast.IndexExpr, the resulting indices slice will contain exactly one
    +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
    +// number of index expressions.
    +//
    +// For nodes that don't represent index expressions, the first return value of
    +// UnpackIndexExpr will be nil.
    +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
    +	switch e := n.(type) {
    +	case *ast.IndexExpr:
    +		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
    +	case *ast.IndexListExpr:
    +		return e.X, e.Lbrack, e.Indices, e.Rbrack
    +	}
    +	return nil, token.NoPos, nil, token.NoPos
    +}
    +
    +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
    +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
    +// will panic.
    +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
    +	switch len(indices) {
    +	case 0:
    +		panic("empty indices")
    +	case 1:
    +		return &ast.IndexExpr{
    +			X:      x,
    +			Lbrack: lbrack,
    +			Index:  indices[0],
    +			Rbrack: rbrack,
    +		}
    +	default:
    +		return &ast.IndexListExpr{
    +			X:       x,
    +			Lbrack:  lbrack,
    +			Indices: indices,
    +			Rbrack:  rbrack,
    +		}
    +	}
    +}
    +
    +// IsTypeParam reports whether t is a type parameter (or an alias of one).
    +func IsTypeParam(t types.Type) bool {
    +	_, ok := types.Unalias(t).(*types.TypeParam)
    +	return ok
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
    new file mode 100644
    index 0000000000..27a2b17929
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
    @@ -0,0 +1,155 @@
    +// Copyright 2022 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typeparams
    +
    +import (
    +	"fmt"
    +	"go/types"
    +)
    +
    +// CoreType returns the core type of T or nil if T does not have a core type.
    +//
    +// See https://go.dev/ref/spec#Core_types for the definition of a core type.
    +func CoreType(T types.Type) types.Type {
    +	U := T.Underlying()
    +	if _, ok := U.(*types.Interface); !ok {
    +		return U // for non-interface types,
    +	}
    +
    +	terms, err := NormalTerms(U)
    +	if len(terms) == 0 || err != nil {
    +		// len(terms) -> empty type set of interface.
    +		// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
    +		return nil // no core type.
    +	}
    +
    +	U = terms[0].Type().Underlying()
    +	var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
    +	for identical = 1; identical < len(terms); identical++ {
    +		if !types.Identical(U, terms[identical].Type().Underlying()) {
    +			break
    +		}
    +	}
    +
    +	if identical == len(terms) {
    +		// https://go.dev/ref/spec#Core_types
    +		// "There is a single type U which is the underlying type of all types in the type set of T"
    +		return U
    +	}
    +	ch, ok := U.(*types.Chan)
    +	if !ok {
    +		return nil // no core type as identical < len(terms) and U is not a channel.
    +	}
    +	// https://go.dev/ref/spec#Core_types
    +	// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
    +	// <-chan E depending on the direction of the directional channels present."
    +	for chans := identical; chans < len(terms); chans++ {
    +		curr, ok := terms[chans].Type().Underlying().(*types.Chan)
    +		if !ok {
    +			return nil
    +		}
    +		if !types.Identical(ch.Elem(), curr.Elem()) {
    +			return nil // channel elements are not identical.
    +		}
    +		if ch.Dir() == types.SendRecv {
    +			// ch is bidirectional. We can safely always use curr's direction.
    +			ch = curr
    +		} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
    +			// ch and curr are not bidirectional and not the same direction.
    +			return nil
    +		}
    +	}
    +	return ch
    +}
    +
    +// NormalTerms returns a slice of terms representing the normalized structural
    +// type restrictions of a type, if any.
    +//
    +// For all types other than *types.TypeParam, *types.Interface, and
    +// *types.Union, this is just a single term with Tilde() == false and
    +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
    +// below.
    +//
    +// Structural type restrictions of a type parameter are created via
    +// non-interface types embedded in its constraint interface (directly, or via a
    +// chain of interface embeddings). For example, in the declaration type
    +// T[P interface{~int; m()}] int the structural restriction of the type
    +// parameter P is ~int.
    +//
    +// With interface embedding and unions, the specification of structural type
    +// restrictions may be arbitrarily complex. For example, consider the
    +// following:
    +//
    +//	type A interface{ ~string|~[]byte }
    +//
    +//	type B interface{ int|string }
    +//
    +//	type C interface { ~string|~int }
    +//
    +//	type T[P interface{ A|B; C }] int
    +//
    +// In this example, the structural type restriction of P is ~string|int: A|B
    +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
    +// which when intersected with C (~string|~int) yields ~string|int.
    +//
    +// NormalTerms computes these expansions and reductions, producing a
    +// "normalized" form of the embeddings. A structural restriction is normalized
    +// if it is a single union containing no interface terms, and is minimal in the
    +// sense that removing any term changes the set of types satisfying the
    +// constraint. It is left as a proof for the reader that, modulo sorting, there
    +// is exactly one such normalized form.
    +//
    +// Because the minimal representation always takes this form, NormalTerms
    +// returns a slice of tilde terms corresponding to the terms of the union in
    +// the normalized structural restriction. An error is returned if the type is
    +// invalid, exceeds complexity bounds, or has an empty type set. In the latter
    +// case, NormalTerms returns ErrEmptyTypeSet.
    +//
    +// NormalTerms makes no guarantees about the order of terms, except that it
    +// is deterministic.
    +func NormalTerms(T types.Type) ([]*types.Term, error) {
    +	// typeSetOf(T) == typeSetOf(Unalias(T))
    +	typ := types.Unalias(T)
    +	if named, ok := typ.(*types.Named); ok {
    +		typ = named.Underlying()
    +	}
    +	switch typ := typ.(type) {
    +	case *types.TypeParam:
    +		return StructuralTerms(typ)
    +	case *types.Union:
    +		return UnionTermSet(typ)
    +	case *types.Interface:
    +		return InterfaceTermSet(typ)
    +	default:
    +		return []*types.Term{types.NewTerm(false, T)}, nil
    +	}
    +}
    +
    +// Deref returns the type of the variable pointed to by t,
    +// if t's core type is a pointer; otherwise it returns t.
    +//
    +// Do not assume that Deref(T)==T implies T is not a pointer:
    +// consider "type T *T", for example.
    +//
    +// TODO(adonovan): ideally this would live in typesinternal, but that
    +// creates an import cycle. Move there when we melt this package down.
    +func Deref(t types.Type) types.Type {
    +	if ptr, ok := CoreType(t).(*types.Pointer); ok {
    +		return ptr.Elem()
    +	}
    +	return t
    +}
    +
    +// MustDeref returns the type of the variable pointed to by t.
    +// It panics if t's core type is not a pointer.
    +//
    +// TODO(adonovan): ideally this would live in typesinternal, but that
    +// creates an import cycle. Move there when we melt this package down.
    +func MustDeref(t types.Type) types.Type {
    +	if ptr, ok := CoreType(t).(*types.Pointer); ok {
    +		return ptr.Elem()
    +	}
    +	panic(fmt.Sprintf("%v is not a pointer", t))
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
    new file mode 100644
    index 0000000000..0ade5c2949
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
    @@ -0,0 +1,131 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typeparams
    +
    +import (
    +	"go/types"
    +
    +	"golang.org/x/tools/internal/aliases"
    +)
    +
    +// Free is a memoization of the set of free type parameters within a
    +// type. It makes a sequence of calls to [Free.Has] for overlapping
    +// types more efficient. The zero value is ready for use.
    +//
    +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
    +type Free struct {
    +	seen map[types.Type]bool
    +}
    +
    +// Has reports whether the specified type has a free type parameter.
    +func (w *Free) Has(typ types.Type) (res bool) {
    +	// detect cycles
    +	if x, ok := w.seen[typ]; ok {
    +		return x
    +	}
    +	if w.seen == nil {
    +		w.seen = make(map[types.Type]bool)
    +	}
    +	w.seen[typ] = false
    +	defer func() {
    +		w.seen[typ] = res
    +	}()
    +
    +	switch t := typ.(type) {
    +	case nil, *types.Basic: // TODO(gri) should nil be handled here?
    +		break
    +
    +	case *types.Alias:
    +		if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
    +			return true // This is an uninstantiated Alias.
    +		}
    +		// The expansion of an alias can have free type parameters,
    +		// whether or not the alias itself has type parameters:
    +		//
    +		//   func _[K comparable]() {
    +		//     type Set      = map[K]bool // free(Set)      = {K}
    +		//     type MapTo[V] = map[K]V    // free(Map[foo]) = {V}
    +		//   }
    +		//
    +		// So, we must Unalias.
    +		return w.Has(types.Unalias(t))
    +
    +	case *types.Array:
    +		return w.Has(t.Elem())
    +
    +	case *types.Slice:
    +		return w.Has(t.Elem())
    +
    +	case *types.Struct:
    +		for i, n := 0, t.NumFields(); i < n; i++ {
    +			if w.Has(t.Field(i).Type()) {
    +				return true
    +			}
    +		}
    +
    +	case *types.Pointer:
    +		return w.Has(t.Elem())
    +
    +	case *types.Tuple:
    +		n := t.Len()
    +		for i := 0; i < n; i++ {
    +			if w.Has(t.At(i).Type()) {
    +				return true
    +			}
    +		}
    +
    +	case *types.Signature:
    +		// t.tparams may not be nil if we are looking at a signature
    +		// of a generic function type (or an interface method) that is
    +		// part of the type we're testing. We don't care about these type
    +		// parameters.
    +		// Similarly, the receiver of a method may declare (rather than
    +		// use) type parameters, we don't care about those either.
    +		// Thus, we only need to look at the input and result parameters.
    +		return w.Has(t.Params()) || w.Has(t.Results())
    +
    +	case *types.Interface:
    +		for i, n := 0, t.NumMethods(); i < n; i++ {
    +			if w.Has(t.Method(i).Type()) {
    +				return true
    +			}
    +		}
    +		terms, err := InterfaceTermSet(t)
    +		if err != nil {
    +			return false // ill typed
    +		}
    +		for _, term := range terms {
    +			if w.Has(term.Type()) {
    +				return true
    +			}
    +		}
    +
    +	case *types.Map:
    +		return w.Has(t.Key()) || w.Has(t.Elem())
    +
    +	case *types.Chan:
    +		return w.Has(t.Elem())
    +
    +	case *types.Named:
    +		args := t.TypeArgs()
    +		if params := t.TypeParams(); params.Len() > args.Len() {
    +			return true // this is an uninstantiated named type.
    +		}
    +		for i, n := 0, args.Len(); i < n; i++ {
    +			if w.Has(args.At(i)) {
    +				return true
    +			}
    +		}
    +		return w.Has(t.Underlying()) // recurse for types local to parameterized functions
    +
    +	case *types.TypeParam:
    +		return true
    +
    +	default:
    +		panic(t) // unreachable
    +	}
    +
    +	return false
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
    new file mode 100644
    index 0000000000..f49802b8ef
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
    @@ -0,0 +1,218 @@
    +// Copyright 2021 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typeparams
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"go/types"
    +	"os"
    +	"strings"
    +)
    +
    +//go:generate go run copytermlist.go
    +
    +const debug = false
    +
    +var ErrEmptyTypeSet = errors.New("empty type set")
    +
    +// StructuralTerms returns a slice of terms representing the normalized
    +// structural type restrictions of a type parameter, if any.
    +//
    +// Structural type restrictions of a type parameter are created via
    +// non-interface types embedded in its constraint interface (directly, or via a
    +// chain of interface embeddings). For example, in the declaration
    +//
    +//	type T[P interface{~int; m()}] int
    +//
    +// the structural restriction of the type parameter P is ~int.
    +//
    +// With interface embedding and unions, the specification of structural type
    +// restrictions may be arbitrarily complex. For example, consider the
    +// following:
    +//
    +//	type A interface{ ~string|~[]byte }
    +//
    +//	type B interface{ int|string }
    +//
    +//	type C interface { ~string|~int }
    +//
    +//	type T[P interface{ A|B; C }] int
    +//
    +// In this example, the structural type restriction of P is ~string|int: A|B
    +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
    +// which when intersected with C (~string|~int) yields ~string|int.
    +//
    +// StructuralTerms computes these expansions and reductions, producing a
    +// "normalized" form of the embeddings. A structural restriction is normalized
    +// if it is a single union containing no interface terms, and is minimal in the
    +// sense that removing any term changes the set of types satisfying the
    +// constraint. It is left as a proof for the reader that, modulo sorting, there
    +// is exactly one such normalized form.
    +//
    +// Because the minimal representation always takes this form, StructuralTerms
    +// returns a slice of tilde terms corresponding to the terms of the union in
    +// the normalized structural restriction. An error is returned if the
    +// constraint interface is invalid, exceeds complexity bounds, or has an empty
    +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
    +//
    +// StructuralTerms makes no guarantees about the order of terms, except that it
    +// is deterministic.
    +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
    +	constraint := tparam.Constraint()
    +	if constraint == nil {
    +		return nil, fmt.Errorf("%s has nil constraint", tparam)
    +	}
    +	iface, _ := constraint.Underlying().(*types.Interface)
    +	if iface == nil {
    +		return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
    +	}
    +	return InterfaceTermSet(iface)
    +}
    +
    +// InterfaceTermSet computes the normalized terms for a constraint interface,
    +// returning an error if the term set cannot be computed or is empty. In the
    +// latter case, the error will be ErrEmptyTypeSet.
    +//
    +// See the documentation of StructuralTerms for more information on
    +// normalization.
    +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
    +	return computeTermSet(iface)
    +}
    +
    +// UnionTermSet computes the normalized terms for a union, returning an error
    +// if the term set cannot be computed or is empty. In the latter case, the
    +// error will be ErrEmptyTypeSet.
    +//
    +// See the documentation of StructuralTerms for more information on
    +// normalization.
    +func UnionTermSet(union *types.Union) ([]*types.Term, error) {
    +	return computeTermSet(union)
    +}
    +
    +func computeTermSet(typ types.Type) ([]*types.Term, error) {
    +	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
    +	if err != nil {
    +		return nil, err
    +	}
    +	if tset.terms.isEmpty() {
    +		return nil, ErrEmptyTypeSet
    +	}
    +	if tset.terms.isAll() {
    +		return nil, nil
    +	}
    +	var terms []*types.Term
    +	for _, term := range tset.terms {
    +		terms = append(terms, types.NewTerm(term.tilde, term.typ))
    +	}
    +	return terms, nil
    +}
    +
    +// A termSet holds the normalized set of terms for a given type.
    +//
    +// The name termSet is intentionally distinct from 'type set': a type set is
    +// all types that implement a type (and includes method restrictions), whereas
    +// a term set just represents the structural restrictions on a type.
    +type termSet struct {
    +	complete bool
    +	terms    termlist
    +}
    +
    +func indentf(depth int, format string, args ...any) {
    +	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
    +}
    +
    +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
    +	if t == nil {
    +		panic("nil type")
    +	}
    +
    +	if debug {
    +		indentf(depth, "%s", t.String())
    +		defer func() {
    +			if err != nil {
    +				indentf(depth, "=> %s", err)
    +			} else {
    +				indentf(depth, "=> %s", res.terms.String())
    +			}
    +		}()
    +	}
    +
    +	const maxTermCount = 100
    +	if tset, ok := seen[t]; ok {
    +		if !tset.complete {
    +			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
    +		}
    +		return tset, nil
    +	}
    +
    +	// Mark the current type as seen to avoid infinite recursion.
    +	tset := new(termSet)
    +	defer func() {
    +		tset.complete = true
    +	}()
    +	seen[t] = tset
    +
    +	switch u := t.Underlying().(type) {
    +	case *types.Interface:
    +		// The term set of an interface is the intersection of the term sets of its
    +		// embedded types.
    +		tset.terms = allTermlist
    +		for i := 0; i < u.NumEmbeddeds(); i++ {
    +			embedded := u.EmbeddedType(i)
    +			if _, ok := embedded.Underlying().(*types.TypeParam); ok {
    +				return nil, fmt.Errorf("invalid embedded type %T", embedded)
    +			}
    +			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
    +			if err != nil {
    +				return nil, err
    +			}
    +			tset.terms = tset.terms.intersect(tset2.terms)
    +		}
    +	case *types.Union:
    +		// The term set of a union is the union of term sets of its terms.
    +		tset.terms = nil
    +		for i := 0; i < u.Len(); i++ {
    +			t := u.Term(i)
    +			var terms termlist
    +			switch t.Type().Underlying().(type) {
    +			case *types.Interface:
    +				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
    +				if err != nil {
    +					return nil, err
    +				}
    +				terms = tset2.terms
    +			case *types.TypeParam, *types.Union:
    +				// A stand-alone type parameter or union is not permitted as union
    +				// term.
    +				return nil, fmt.Errorf("invalid union term %T", t)
    +			default:
    +				if t.Type() == types.Typ[types.Invalid] {
    +					continue
    +				}
    +				terms = termlist{{t.Tilde(), t.Type()}}
    +			}
    +			tset.terms = tset.terms.union(terms)
    +			if len(tset.terms) > maxTermCount {
    +				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
    +			}
    +		}
    +	case *types.TypeParam:
    +		panic("unreachable")
    +	default:
    +		// For all other types, the term set is just a single non-tilde term
    +		// holding the type itself.
    +		if u != types.Typ[types.Invalid] {
    +			tset.terms = termlist{{false, t}}
    +		}
    +	}
    +	return tset, nil
    +}
    +
    +// under is a facade for the go/types internal function of the same name. It is
    +// used by typeterm.go.
    +func under(t types.Type) types.Type {
    +	return t.Underlying()
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    new file mode 100644
    index 0000000000..cbd12f8013
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    @@ -0,0 +1,163 @@
    +// Copyright 2021 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by copytermlist.go DO NOT EDIT.
    +
    +package typeparams
    +
    +import (
    +	"bytes"
    +	"go/types"
    +)
    +
    +// A termlist represents the type set represented by the union
    +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
    +// A termlist is in normal form if all terms are disjoint.
    +// termlist operations don't require the operands to be in
    +// normal form.
    +type termlist []*term
    +
    +// allTermlist represents the set of all types.
    +// It is in normal form.
    +var allTermlist = termlist{new(term)}
    +
    +// String prints the termlist exactly (without normalization).
    +func (xl termlist) String() string {
    +	if len(xl) == 0 {
    +		return "∅"
    +	}
    +	var buf bytes.Buffer
    +	for i, x := range xl {
    +		if i > 0 {
    +			buf.WriteString(" | ")
    +		}
    +		buf.WriteString(x.String())
    +	}
    +	return buf.String()
    +}
    +
    +// isEmpty reports whether the termlist xl represents the empty set of types.
    +func (xl termlist) isEmpty() bool {
    +	// If there's a non-nil term, the entire list is not empty.
    +	// If the termlist is in normal form, this requires at most
    +	// one iteration.
    +	for _, x := range xl {
    +		if x != nil {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +// isAll reports whether the termlist xl represents the set of all types.
    +func (xl termlist) isAll() bool {
    +	// If there's a 𝓤 term, the entire list is 𝓤.
    +	// If the termlist is in normal form, this requires at most
    +	// one iteration.
    +	for _, x := range xl {
    +		if x != nil && x.typ == nil {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// norm returns the normal form of xl.
    +func (xl termlist) norm() termlist {
    +	// Quadratic algorithm, but good enough for now.
    +	// TODO(gri) fix asymptotic performance
    +	used := make([]bool, len(xl))
    +	var rl termlist
    +	for i, xi := range xl {
    +		if xi == nil || used[i] {
    +			continue
    +		}
    +		for j := i + 1; j < len(xl); j++ {
    +			xj := xl[j]
    +			if xj == nil || used[j] {
    +				continue
    +			}
    +			if u1, u2 := xi.union(xj); u2 == nil {
    +				// If we encounter a 𝓤 term, the entire list is 𝓤.
    +				// Exit early.
    +				// (Note that this is not just an optimization;
    +				// if we continue, we may end up with a 𝓤 term
    +				// and other terms and the result would not be
    +				// in normal form.)
    +				if u1.typ == nil {
    +					return allTermlist
    +				}
    +				xi = u1
    +				used[j] = true // xj is now unioned into xi - ignore it in future iterations
    +			}
    +		}
    +		rl = append(rl, xi)
    +	}
    +	return rl
    +}
    +
    +// union returns the union xl ∪ yl.
    +func (xl termlist) union(yl termlist) termlist {
    +	return append(xl, yl...).norm()
    +}
    +
    +// intersect returns the intersection xl ∩ yl.
    +func (xl termlist) intersect(yl termlist) termlist {
    +	if xl.isEmpty() || yl.isEmpty() {
    +		return nil
    +	}
    +
    +	// Quadratic algorithm, but good enough for now.
    +	// TODO(gri) fix asymptotic performance
    +	var rl termlist
    +	for _, x := range xl {
    +		for _, y := range yl {
    +			if r := x.intersect(y); r != nil {
    +				rl = append(rl, r)
    +			}
    +		}
    +	}
    +	return rl.norm()
    +}
    +
    +// equal reports whether xl and yl represent the same type set.
    +func (xl termlist) equal(yl termlist) bool {
    +	// TODO(gri) this should be more efficient
    +	return xl.subsetOf(yl) && yl.subsetOf(xl)
    +}
    +
    +// includes reports whether t ∈ xl.
    +func (xl termlist) includes(t types.Type) bool {
    +	for _, x := range xl {
    +		if x.includes(t) {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// supersetOf reports whether y ⊆ xl.
    +func (xl termlist) supersetOf(y *term) bool {
    +	for _, x := range xl {
    +		if y.subsetOf(x) {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// subsetOf reports whether xl ⊆ yl.
    +func (xl termlist) subsetOf(yl termlist) bool {
    +	if yl.isEmpty() {
    +		return xl.isEmpty()
    +	}
    +
    +	// each term x of xl must be a subset of yl
    +	for _, x := range xl {
    +		if !yl.supersetOf(x) {
    +			return false // x is not a subset yl
    +		}
    +	}
    +	return true
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    new file mode 100644
    index 0000000000..7350bb702a
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    @@ -0,0 +1,169 @@
    +// Copyright 2021 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by copytermlist.go DO NOT EDIT.
    +
    +package typeparams
    +
    +import "go/types"
    +
    +// A term describes elementary type sets:
    +//
    +//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
    +//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
    +//	 T:  &term{false, T}  == {T}                    // set of type T
    +//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
    +type term struct {
    +	tilde bool // valid if typ != nil
    +	typ   types.Type
    +}
    +
    +func (x *term) String() string {
    +	switch {
    +	case x == nil:
    +		return "∅"
    +	case x.typ == nil:
    +		return "𝓤"
    +	case x.tilde:
    +		return "~" + x.typ.String()
    +	default:
    +		return x.typ.String()
    +	}
    +}
    +
    +// equal reports whether x and y represent the same type set.
    +func (x *term) equal(y *term) bool {
    +	// easy cases
    +	switch {
    +	case x == nil || y == nil:
    +		return x == y
    +	case x.typ == nil || y.typ == nil:
    +		return x.typ == y.typ
    +	}
    +	// ∅ ⊂ x, y ⊂ 𝓤
    +
    +	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
    +}
    +
    +// union returns the union x ∪ y: zero, one, or two non-nil terms.
    +func (x *term) union(y *term) (_, _ *term) {
    +	// easy cases
    +	switch {
    +	case x == nil && y == nil:
    +		return nil, nil // ∅ ∪ ∅ == ∅
    +	case x == nil:
    +		return y, nil // ∅ ∪ y == y
    +	case y == nil:
    +		return x, nil // x ∪ ∅ == x
    +	case x.typ == nil:
    +		return x, nil // 𝓤 ∪ y == 𝓤
    +	case y.typ == nil:
    +		return y, nil // x ∪ 𝓤 == 𝓤
    +	}
    +	// ∅ ⊂ x, y ⊂ 𝓤
    +
    +	if x.disjoint(y) {
    +		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
    +	}
    +	// x.typ == y.typ
    +
    +	// ~t ∪ ~t == ~t
    +	// ~t ∪  T == ~t
    +	//  T ∪ ~t == ~t
    +	//  T ∪  T ==  T
    +	if x.tilde || !y.tilde {
    +		return x, nil
    +	}
    +	return y, nil
    +}
    +
    +// intersect returns the intersection x ∩ y.
    +func (x *term) intersect(y *term) *term {
    +	// easy cases
    +	switch {
    +	case x == nil || y == nil:
    +		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
    +	case x.typ == nil:
    +		return y // 𝓤 ∩ y == y
    +	case y.typ == nil:
    +		return x // x ∩ 𝓤 == x
    +	}
    +	// ∅ ⊂ x, y ⊂ 𝓤
    +
    +	if x.disjoint(y) {
    +		return nil // x ∩ y == ∅ if x ∩ y == ∅
    +	}
    +	// x.typ == y.typ
    +
    +	// ~t ∩ ~t == ~t
    +	// ~t ∩  T ==  T
    +	//  T ∩ ~t ==  T
    +	//  T ∩  T ==  T
    +	if !x.tilde || y.tilde {
    +		return x
    +	}
    +	return y
    +}
    +
    +// includes reports whether t ∈ x.
    +func (x *term) includes(t types.Type) bool {
    +	// easy cases
    +	switch {
    +	case x == nil:
    +		return false // t ∈ ∅ == false
    +	case x.typ == nil:
    +		return true // t ∈ 𝓤 == true
    +	}
    +	// ∅ ⊂ x ⊂ 𝓤
    +
    +	u := t
    +	if x.tilde {
    +		u = under(u)
    +	}
    +	return types.Identical(x.typ, u)
    +}
    +
    +// subsetOf reports whether x ⊆ y.
    +func (x *term) subsetOf(y *term) bool {
    +	// easy cases
    +	switch {
    +	case x == nil:
    +		return true // ∅ ⊆ y == true
    +	case y == nil:
    +		return false // x ⊆ ∅ == false since x != ∅
    +	case y.typ == nil:
    +		return true // x ⊆ 𝓤 == true
    +	case x.typ == nil:
    +		return false // 𝓤 ⊆ y == false since y != 𝓤
    +	}
    +	// ∅ ⊂ x, y ⊂ 𝓤
    +
    +	if x.disjoint(y) {
    +		return false // x ⊆ y == false if x ∩ y == ∅
    +	}
    +	// x.typ == y.typ
    +
    +	// ~t ⊆ ~t == true
    +	// ~t ⊆ T == false
    +	//  T ⊆ ~t == true
    +	//  T ⊆  T == true
    +	return !x.tilde || y.tilde
    +}
    +
    +// disjoint reports whether x ∩ y == ∅.
    +// x.typ and y.typ must not be nil.
    +func (x *term) disjoint(y *term) bool {
    +	if debug && (x.typ == nil || y.typ == nil) {
    +		panic("invalid argument(s)")
    +	}
    +	ux := x.typ
    +	if y.tilde {
    +		ux = under(ux)
    +	}
    +	uy := y.typ
    +	if x.tilde {
    +		uy = under(uy)
    +	}
    +	return !types.Identical(ux, uy)
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
    new file mode 100644
    index 0000000000..4957f02164
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
    @@ -0,0 +1,133 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"fmt"
    +	"go/types"
    +
    +	"golang.org/x/tools/go/types/typeutil"
    +)
    +
    +// ForEachElement calls f for type T and each type reachable from its
    +// type through reflection. It does this by recursively stripping off
    +// type constructors; in addition, for each named type N, the type *N
    +// is added to the result as it may have additional methods.
    +//
    +// The caller must provide an initially empty set used to de-duplicate
    +// identical types, potentially across multiple calls to ForEachElement.
    +// (Its final value holds all the elements seen, matching the arguments
    +// passed to f.)
    +//
    +// TODO(adonovan): share/harmonize with go/callgraph/rta.
    +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
    +	var visit func(T types.Type, skip bool)
    +	visit = func(T types.Type, skip bool) {
    +		if !skip {
    +			if seen, _ := rtypes.Set(T, true).(bool); seen {
    +				return // de-dup
    +			}
    +
    +			f(T) // notify caller of new element type
    +		}
    +
    +		// Recursion over signatures of each method.
    +		tmset := msets.MethodSet(T)
    +		for i := 0; i < tmset.Len(); i++ {
    +			sig := tmset.At(i).Type().(*types.Signature)
    +			// It is tempting to call visit(sig, false)
    +			// but, as noted in golang.org/cl/65450043,
    +			// the Signature.Recv field is ignored by
    +			// types.Identical and typeutil.Map, which
    +			// is confusing at best.
    +			//
    +			// More importantly, the true signature rtype
    +			// reachable from a method using reflection
    +			// has no receiver but an extra ordinary parameter.
    +			// For the Read method of io.Reader we want:
    +			//   func(Reader, []byte) (int, error)
    +			// but here sig is:
    +			//   func([]byte) (int, error)
    +			// with .Recv = Reader (though it is hard to
    +			// notice because it doesn't affect Signature.String
    +			// or types.Identical).
    +			//
    +			// TODO(adonovan): construct and visit the correct
    +			// non-method signature with an extra parameter
    +			// (though since unnamed func types have no methods
    +			// there is essentially no actual demand for this).
    +			//
    +			// TODO(adonovan): document whether or not it is
    +			// safe to skip non-exported methods (as RTA does).
    +			visit(sig.Params(), true)  // skip the Tuple
    +			visit(sig.Results(), true) // skip the Tuple
    +		}
    +
    +		switch T := T.(type) {
    +		case *types.Alias:
    +			visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
    +
    +		case *types.Basic:
    +			// nop
    +
    +		case *types.Interface:
    +			// nop---handled by recursion over method set.
    +
    +		case *types.Pointer:
    +			visit(T.Elem(), false)
    +
    +		case *types.Slice:
    +			visit(T.Elem(), false)
    +
    +		case *types.Chan:
    +			visit(T.Elem(), false)
    +
    +		case *types.Map:
    +			visit(T.Key(), false)
    +			visit(T.Elem(), false)
    +
    +		case *types.Signature:
    +			if T.Recv() != nil {
    +				panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
    +			}
    +			visit(T.Params(), true)  // skip the Tuple
    +			visit(T.Results(), true) // skip the Tuple
    +
    +		case *types.Named:
    +			// A pointer-to-named type can be derived from a named
    +			// type via reflection.  It may have methods too.
    +			visit(types.NewPointer(T), false)
    +
    +			// Consider 'type T struct{S}' where S has methods.
    +			// Reflection provides no way to get from T to struct{S},
    +			// only to S, so the method set of struct{S} is unwanted,
    +			// so set 'skip' flag during recursion.
    +			visit(T.Underlying(), true) // skip the unnamed type
    +
    +		case *types.Array:
    +			visit(T.Elem(), false)
    +
    +		case *types.Struct:
    +			for i, n := 0, T.NumFields(); i < n; i++ {
    +				// TODO(adonovan): document whether or not
    +				// it is safe to skip non-exported fields.
    +				visit(T.Field(i).Type(), false)
    +			}
    +
    +		case *types.Tuple:
    +			for i, n := 0, T.Len(); i < n; i++ {
    +				visit(T.At(i).Type(), false)
    +			}
    +
    +		case *types.TypeParam, *types.Union:
    +			// forEachReachable must not be called on parameterized types.
    +			panic(T)
    +
    +		default:
    +			panic(T)
    +		}
    +	}
    +	visit(T, false)
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    index 834e05381c..235a6defc4 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
    @@ -838,7 +838,7 @@ const (
     	// InvalidCap occurs when an argument to the cap built-in function is not of
     	// supported type.
     	//
    -	// See https://golang.org/ref/spec#Lengthand_capacity for information on
    +	// See https://golang.org/ref/spec#Length_and_capacity for information on
     	// which underlying types are supported as arguments to cap and len.
     	//
     	// Example:
    @@ -859,7 +859,7 @@ const (
     	// InvalidCopy occurs when the arguments are not of slice type or do not
     	// have compatible type.
     	//
    -	// See https://golang.org/ref/spec#Appendingand_copying_slices for more
    +	// See https://golang.org/ref/spec#Appending_and_copying_slices for more
     	// information on the type requirements for the copy built-in.
     	//
     	// Example:
    @@ -897,7 +897,7 @@ const (
     	// InvalidLen occurs when an argument to the len built-in function is not of
     	// supported type.
     	//
    -	// See https://golang.org/ref/spec#Lengthand_capacity for information on
    +	// See https://golang.org/ref/spec#Length_and_capacity for information on
     	// which underlying types are supported as arguments to cap and len.
     	//
     	// Example:
    @@ -914,7 +914,7 @@ const (
     
     	// InvalidMake occurs when make is called with an unsupported type argument.
     	//
    -	// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
    +	// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
     	// information on the types that may be created using make.
     	//
     	// Example:
    @@ -966,7 +966,7 @@ const (
     	//  var _ = string(x)
     	InvalidConversion
     
    -	// InvalidUntypedConversion occurs when an there is no valid implicit
    +	// InvalidUntypedConversion occurs when there is no valid implicit
     	// conversion from an untyped value satisfying the type constraints of the
     	// context in which it is used.
     	//
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
    new file mode 100644
    index 0000000000..b64f714eb3
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
    @@ -0,0 +1,46 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"go/ast"
    +	"go/types"
    +	"strconv"
    +)
    +
    +// FileQualifier returns a [types.Qualifier] function that qualifies
    +// imported symbols appropriately based on the import environment of a given
    +// file.
    +// If the same package is imported multiple times, the last appearance is
    +// recorded.
    +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
    +	// Construct mapping of import paths to their defined names.
    +	// It is only necessary to look at renaming imports.
    +	imports := make(map[string]string)
    +	for _, imp := range f.Imports {
    +		if imp.Name != nil && imp.Name.Name != "_" {
    +			path, _ := strconv.Unquote(imp.Path.Value)
    +			imports[path] = imp.Name.Name
    +		}
    +	}
    +
    +	// Define qualifier to replace full package paths with names of the imports.
    +	return func(p *types.Package) string {
    +		if p == nil || p == pkg {
    +			return ""
    +		}
    +
    +		if name, ok := imports[p.Path()]; ok {
    +			if name == "." {
    +				return ""
    +			} else {
    +				return name
    +			}
    +		}
    +
    +		// If there is no local renaming, fall back to the package name.
    +		return p.Name()
    +	}
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    index fea7c8b75e..8352ea7617 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
    @@ -6,20 +6,21 @@ package typesinternal
     
     import (
     	"go/types"
    -
    -	"golang.org/x/tools/internal/aliases"
     )
     
     // ReceiverNamed returns the named type (if any) associated with the
     // type of recv, which may be of the form N or *N, or aliases thereof.
     // It also reports whether a Pointer was present.
    +//
    +// The named result may be nil if recv is from a method on an
    +// anonymous interface or struct types or in ill-typed code.
     func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
     	t := recv.Type()
    -	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
    +	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
     		isPtr = true
     		t = ptr.Elem()
     	}
    -	named, _ = aliases.Unalias(t).(*types.Named)
    +	named, _ = types.Unalias(t).(*types.Named)
     	return
     }
     
    @@ -36,7 +37,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
     // indirection from the type, regardless of named types (analogous to
     // a LOAD instruction).
     func Unpointer(t types.Type) types.Type {
    -	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
    +	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
     		return ptr.Elem()
     	}
     	return t
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
    index 7c77c2fbc0..edf0347ec3 100644
    --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
    @@ -11,6 +11,8 @@ import (
     	"go/types"
     	"reflect"
     	"unsafe"
    +
    +	"golang.org/x/tools/internal/aliases"
     )
     
     func SetUsesCgo(conf *types.Config) bool {
    @@ -30,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool {
     	return true
     }
     
    -// ReadGo116ErrorData extracts additional information from types.Error values
    +// ErrorCodeStartEnd extracts additional information from types.Error values
     // generated by Go version 1.16 and later: the error code, start position, and
     // end position. If all positions are valid, start <= err.Pos <= end.
     //
     // If the data could not be read, the final result parameter will be false.
    -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
    +//
    +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
    +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
     	var data [3]int
     	// By coincidence all of these fields are ints, which simplifies things.
     	v := reflect.ValueOf(err)
    @@ -48,3 +52,78 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
     	}
     	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
     }
    +
    +// NameRelativeTo returns a types.Qualifier that qualifies members of
    +// all packages other than pkg, using only the package name.
    +// (By contrast, [types.RelativeTo] uses the complete package path,
    +// which is often excessive.)
    +//
    +// If pkg is nil, it is equivalent to [*types.Package.Name].
    +func NameRelativeTo(pkg *types.Package) types.Qualifier {
    +	return func(other *types.Package) string {
    +		if pkg != nil && pkg == other {
    +			return "" // same package; unqualified
    +		}
    +		return other.Name()
    +	}
    +}
    +
    +// A NamedOrAlias is a [types.Type] that is named (as
    +// defined by the spec) and capable of bearing type parameters: it
    +// abstracts aliases ([types.Alias]) and defined types
    +// ([types.Named]).
    +//
    +// Every type declared by an explicit "type" declaration is a
    +// NamedOrAlias. (Built-in type symbols may additionally
    +// have type [types.Basic], which is not a NamedOrAlias,
    +// though the spec regards them as "named".)
    +//
    +// NamedOrAlias cannot expose the Origin method, because
    +// [types.Alias.Origin] and [types.Named.Origin] have different
    +// (covariant) result types; use [Origin] instead.
    +type NamedOrAlias interface {
    +	types.Type
    +	Obj() *types.TypeName
    +	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
    +}
    +
    +// TypeParams is a light shim around t.TypeParams().
    +// (go/types.Alias).TypeParams requires >= 1.23.
    +func TypeParams(t NamedOrAlias) *types.TypeParamList {
    +	switch t := t.(type) {
    +	case *types.Alias:
    +		return aliases.TypeParams(t)
    +	case *types.Named:
    +		return t.TypeParams()
    +	}
    +	return nil
    +}
    +
    +// TypeArgs is a light shim around t.TypeArgs().
    +// (go/types.Alias).TypeArgs requires >= 1.23.
    +func TypeArgs(t NamedOrAlias) *types.TypeList {
    +	switch t := t.(type) {
    +	case *types.Alias:
    +		return aliases.TypeArgs(t)
    +	case *types.Named:
    +		return t.TypeArgs()
    +	}
    +	return nil
    +}
    +
    +// Origin returns the generic type of the Named or Alias type t if it
    +// is instantiated, otherwise it returns t.
    +func Origin(t NamedOrAlias) NamedOrAlias {
    +	switch t := t.(type) {
    +	case *types.Alias:
    +		return aliases.Origin(t)
    +	case *types.Named:
    +		return t.Origin()
    +	}
    +	return t
    +}
    +
    +// IsPackageLevel reports whether obj is a package-level symbol.
    +func IsPackageLevel(obj types.Object) bool {
    +	return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
    +}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
    new file mode 100644
    index 0000000000..e5da049511
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
    @@ -0,0 +1,40 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
    +// this API that actually does something.
    +
    +import "go/types"
    +
    +type VarKind uint8
    +
    +const (
    +	_          VarKind = iota // (not meaningful)
    +	PackageVar                // a package-level variable
    +	LocalVar                  // a local variable
    +	RecvVar                   // a method receiver variable
    +	ParamVar                  // a function parameter variable
    +	ResultVar                 // a function result variable
    +	FieldVar                  // a struct field
    +)
    +
    +func (kind VarKind) String() string {
    +	return [...]string{
    +		0:          "VarKind(0)",
    +		PackageVar: "PackageVar",
    +		LocalVar:   "LocalVar",
    +		RecvVar:    "RecvVar",
    +		ParamVar:   "ParamVar",
    +		ResultVar:  "ResultVar",
    +		FieldVar:   "FieldVar",
    +	}[kind]
    +}
    +
    +// GetVarKind returns an invalid VarKind.
    +func GetVarKind(v *types.Var) VarKind { return 0 }
    +
    +// SetVarKind has no effect.
    +func SetVarKind(v *types.Var, kind VarKind) {}
    diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
    new file mode 100644
    index 0000000000..d272949c17
    --- /dev/null
    +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
    @@ -0,0 +1,392 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"fmt"
    +	"go/ast"
    +	"go/token"
    +	"go/types"
    +	"strings"
    +)
    +
    +// ZeroString returns the string representation of the zero value for any type t.
    +// The boolean result indicates whether the type is or contains an invalid type
    +// or a non-basic (constraint) interface type.
    +//
    +// Even for invalid input types, ZeroString may return a partially correct
    +// string representation. The caller should use the returned isValid boolean
    +// to determine the validity of the expression.
    +//
    +// When assigning to a wider type (such as 'any'), it's the caller's
    +// responsibility to handle any necessary type conversions.
    +//
    +// This string can be used on the right-hand side of an assignment where the
    +// left-hand side has that explicit type.
    +// References to named types are qualified by an appropriate (optional)
    +// qualifier function.
    +// Exception: This does not apply to tuples. Their string representation is
    +// informational only and cannot be used in an assignment.
    +//
    +// See [ZeroExpr] for a variant that returns an [ast.Expr].
    +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
    +	switch t := t.(type) {
    +	case *types.Basic:
    +		switch {
    +		case t.Info()&types.IsBoolean != 0:
    +			return "false", true
    +		case t.Info()&types.IsNumeric != 0:
    +			return "0", true
    +		case t.Info()&types.IsString != 0:
    +			return `""`, true
    +		case t.Kind() == types.UnsafePointer:
    +			fallthrough
    +		case t.Kind() == types.UntypedNil:
    +			return "nil", true
    +		case t.Kind() == types.Invalid:
    +			return "invalid", false
    +		default:
    +			panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
    +		}
    +
    +	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
    +		return "nil", true
    +
    +	case *types.Interface:
    +		if !t.IsMethodSet() {
    +			return "invalid", false
    +		}
    +		return "nil", true
    +
    +	case *types.Named:
    +		switch under := t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return types.TypeString(t, qual) + "{}", true
    +		default:
    +			return ZeroString(under, qual)
    +		}
    +
    +	case *types.Alias:
    +		switch t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return types.TypeString(t, qual) + "{}", true
    +		default:
    +			// A type parameter can have alias but alias type's underlying type
    +			// can never be a type parameter.
    +			// Use types.Unalias to preserve the info of type parameter instead
    +			// of call Underlying() going right through and get the underlying
    +			// type of the type parameter which is always an interface.
    +			return ZeroString(types.Unalias(t), qual)
    +		}
    +
    +	case *types.Array, *types.Struct:
    +		return types.TypeString(t, qual) + "{}", true
    +
    +	case *types.TypeParam:
    +		// Assumes func new is not shadowed.
    +		return "*new(" + types.TypeString(t, qual) + ")", true
    +
    +	case *types.Tuple:
    +		// Tuples are not normal values.
    +		// We are currently format as "(t[0], ..., t[n])". Could be something else.
    +		isValid := true
    +		components := make([]string, t.Len())
    +		for i := 0; i < t.Len(); i++ {
    +			comp, ok := ZeroString(t.At(i).Type(), qual)
    +
    +			components[i] = comp
    +			isValid = isValid && ok
    +		}
    +		return "(" + strings.Join(components, ", ") + ")", isValid
    +
    +	case *types.Union:
    +		// Variables of these types cannot be created, so it makes
    +		// no sense to ask for their zero value.
    +		panic(fmt.Sprintf("invalid type for a variable: %v", t))
    +
    +	default:
    +		panic(t) // unreachable.
    +	}
    +}
    +
    +// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
    +// The boolean result indicates whether the type is or contains an invalid type
    +// or a non-basic (constraint) interface type.
    +//
    +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
    +// representation. The caller should use the returned isValid boolean to determine
    +// the validity of the expression.
    +//
    +// This function is designed for types suitable for variables and should not be
    +// used with Tuple or Union types.References to named types are qualified by an
    +// appropriate (optional) qualifier function.
    +//
    +// See [ZeroString] for a variant that returns a string.
    +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
    +	switch t := t.(type) {
    +	case *types.Basic:
    +		switch {
    +		case t.Info()&types.IsBoolean != 0:
    +			return &ast.Ident{Name: "false"}, true
    +		case t.Info()&types.IsNumeric != 0:
    +			return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
    +		case t.Info()&types.IsString != 0:
    +			return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
    +		case t.Kind() == types.UnsafePointer:
    +			fallthrough
    +		case t.Kind() == types.UntypedNil:
    +			return ast.NewIdent("nil"), true
    +		case t.Kind() == types.Invalid:
    +			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
    +		default:
    +			panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
    +		}
    +
    +	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
    +		return ast.NewIdent("nil"), true
    +
    +	case *types.Interface:
    +		if !t.IsMethodSet() {
    +			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
    +		}
    +		return ast.NewIdent("nil"), true
    +
    +	case *types.Named:
    +		switch under := t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return &ast.CompositeLit{
    +				Type: TypeExpr(t, qual),
    +			}, true
    +		default:
    +			return ZeroExpr(under, qual)
    +		}
    +
    +	case *types.Alias:
    +		switch t.Underlying().(type) {
    +		case *types.Struct, *types.Array:
    +			return &ast.CompositeLit{
    +				Type: TypeExpr(t, qual),
    +			}, true
    +		default:
    +			return ZeroExpr(types.Unalias(t), qual)
    +		}
    +
    +	case *types.Array, *types.Struct:
    +		return &ast.CompositeLit{
    +			Type: TypeExpr(t, qual),
    +		}, true
    +
    +	case *types.TypeParam:
    +		return &ast.StarExpr{ // *new(T)
    +			X: &ast.CallExpr{
    +				// Assumes func new is not shadowed.
    +				Fun: ast.NewIdent("new"),
    +				Args: []ast.Expr{
    +					ast.NewIdent(t.Obj().Name()),
    +				},
    +			},
    +		}, true
    +
    +	case *types.Tuple:
    +		// Unlike ZeroString, there is no ast.Expr can express tuple by
    +		// "(t[0], ..., t[n])".
    +		panic(fmt.Sprintf("invalid type for a variable: %v", t))
    +
    +	case *types.Union:
    +		// Variables of these types cannot be created, so it makes
    +		// no sense to ask for their zero value.
    +		panic(fmt.Sprintf("invalid type for a variable: %v", t))
    +
    +	default:
    +		panic(t) // unreachable.
    +	}
    +}
    +
    +// IsZeroExpr uses simple syntactic heuristics to report whether expr
    +// is a obvious zero value, such as 0, "", nil, or false.
    +// It cannot do better without type information.
    +func IsZeroExpr(expr ast.Expr) bool {
    +	switch e := expr.(type) {
    +	case *ast.BasicLit:
    +		return e.Value == "0" || e.Value == `""`
    +	case *ast.Ident:
    +		return e.Name == "nil" || e.Name == "false"
    +	default:
    +		return false
    +	}
    +}
    +
    +// TypeExpr returns syntax for the specified type. References to named types
    +// are qualified by an appropriate (optional) qualifier function.
    +// It may panic for types such as Tuple or Union.
    +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
    +	switch t := t.(type) {
    +	case *types.Basic:
    +		switch t.Kind() {
    +		case types.UnsafePointer:
    +			return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
    +		default:
    +			return ast.NewIdent(t.Name())
    +		}
    +
    +	case *types.Pointer:
    +		return &ast.UnaryExpr{
    +			Op: token.MUL,
    +			X:  TypeExpr(t.Elem(), qual),
    +		}
    +
    +	case *types.Array:
    +		return &ast.ArrayType{
    +			Len: &ast.BasicLit{
    +				Kind:  token.INT,
    +				Value: fmt.Sprintf("%d", t.Len()),
    +			},
    +			Elt: TypeExpr(t.Elem(), qual),
    +		}
    +
    +	case *types.Slice:
    +		return &ast.ArrayType{
    +			Elt: TypeExpr(t.Elem(), qual),
    +		}
    +
    +	case *types.Map:
    +		return &ast.MapType{
    +			Key:   TypeExpr(t.Key(), qual),
    +			Value: TypeExpr(t.Elem(), qual),
    +		}
    +
    +	case *types.Chan:
    +		dir := ast.ChanDir(t.Dir())
    +		if t.Dir() == types.SendRecv {
    +			dir = ast.SEND | ast.RECV
    +		}
    +		return &ast.ChanType{
    +			Dir:   dir,
    +			Value: TypeExpr(t.Elem(), qual),
    +		}
    +
    +	case *types.Signature:
    +		var params []*ast.Field
    +		for i := 0; i < t.Params().Len(); i++ {
    +			params = append(params, &ast.Field{
    +				Type: TypeExpr(t.Params().At(i).Type(), qual),
    +				Names: []*ast.Ident{
    +					{
    +						Name: t.Params().At(i).Name(),
    +					},
    +				},
    +			})
    +		}
    +		if t.Variadic() {
    +			last := params[len(params)-1]
    +			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
    +		}
    +		var returns []*ast.Field
    +		for i := 0; i < t.Results().Len(); i++ {
    +			returns = append(returns, &ast.Field{
    +				Type: TypeExpr(t.Results().At(i).Type(), qual),
    +			})
    +		}
    +		return &ast.FuncType{
    +			Params: &ast.FieldList{
    +				List: params,
    +			},
    +			Results: &ast.FieldList{
    +				List: returns,
    +			},
    +		}
    +
    +	case *types.TypeParam:
    +		pkgName := qual(t.Obj().Pkg())
    +		if pkgName == "" || t.Obj().Pkg() == nil {
    +			return ast.NewIdent(t.Obj().Name())
    +		}
    +		return &ast.SelectorExpr{
    +			X:   ast.NewIdent(pkgName),
    +			Sel: ast.NewIdent(t.Obj().Name()),
    +		}
    +
    +	// types.TypeParam also implements interface NamedOrAlias. To differentiate,
    +	// case TypeParam need to be present before case NamedOrAlias.
    +	// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
    +	// NamedOrAlias.
    +	case NamedOrAlias:
    +		var expr ast.Expr = ast.NewIdent(t.Obj().Name())
    +		if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
    +			expr = &ast.SelectorExpr{
    +				X:   ast.NewIdent(pkgName),
    +				Sel: expr.(*ast.Ident),
    +			}
    +		}
    +
    +		// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
    +		// typesinternal.NamedOrAlias.
    +		if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
    +			if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
    +				var indices []ast.Expr
    +				for i := range typeArgs.Len() {
    +					indices = append(indices, TypeExpr(typeArgs.At(i), qual))
    +				}
    +				expr = &ast.IndexListExpr{
    +					X:       expr,
    +					Indices: indices,
    +				}
    +			}
    +		}
    +
    +		return expr
    +
    +	case *types.Struct:
    +		return ast.NewIdent(t.String())
    +
    +	case *types.Interface:
    +		return ast.NewIdent(t.String())
    +
    +	case *types.Union:
    +		if t.Len() == 0 {
    +			panic("Union type should have at least one term")
    +		}
    +		// Same as go/ast, the return expression will put last term in the
    +		// Y field at topmost level of BinaryExpr.
    +		// For union of type "float32 | float64 | int64", the structure looks
    +		// similar to:
    +		// {
    +		// 	X: {
    +		// 		X: float32,
    +		// 		Op: |
    +		// 		Y: float64,
    +		// 	}
    +		// 	Op: |,
    +		// 	Y: int64,
    +		// }
    +		var union ast.Expr
    +		for i := range t.Len() {
    +			term := t.Term(i)
    +			termExpr := TypeExpr(term.Type(), qual)
    +			if term.Tilde() {
    +				termExpr = &ast.UnaryExpr{
    +					Op: token.TILDE,
    +					X:  termExpr,
    +				}
    +			}
    +			if i == 0 {
    +				union = termExpr
    +			} else {
    +				union = &ast.BinaryExpr{
    +					X:  union,
    +					Op: token.OR,
    +					Y:  termExpr,
    +				}
    +			}
    +		}
    +		return union
    +
    +	case *types.Tuple:
    +		panic("invalid input type types.Tuple")
    +
    +	default:
    +		panic("unreachable")
    +	}
    +}
    diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
    deleted file mode 100644
    index 377bf7a53b..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -package versions
    -
    -// toolchain is maximum version (<1.22) that the go toolchain used
    -// to build the current tool is known to support.
    -//
    -// When a tool is built with >=1.22, the value of toolchain is unused.
    -//
    -// x/tools does not support building with go <1.18. So we take this
    -// as the minimum possible maximum.
    -var toolchain string = Go1_18
    diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
    deleted file mode 100644
    index f65beed9d8..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.19
    -// +build go1.19
    -
    -package versions
    -
    -func init() {
    -	if Compare(toolchain, Go1_19) < 0 {
    -		toolchain = Go1_19
    -	}
    -}
    diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
    deleted file mode 100644
    index 1a9efa126c..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.20
    -// +build go1.20
    -
    -package versions
    -
    -func init() {
    -	if Compare(toolchain, Go1_20) < 0 {
    -		toolchain = Go1_20
    -	}
    -}
    diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
    deleted file mode 100644
    index b7ef216dfe..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.21
    -// +build go1.21
    -
    -package versions
    -
    -func init() {
    -	if Compare(toolchain, Go1_21) < 0 {
    -		toolchain = Go1_21
    -	}
    -}
    diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
    index 562eef21fa..0fc10ce4eb 100644
    --- a/vendor/golang.org/x/tools/internal/versions/types.go
    +++ b/vendor/golang.org/x/tools/internal/versions/types.go
    @@ -5,15 +5,29 @@
     package versions
     
     import (
    +	"go/ast"
     	"go/types"
     )
     
    -// GoVersion returns the Go version of the type package.
    -// It returns zero if no version can be determined.
    -func GoVersion(pkg *types.Package) string {
    -	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
    -	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
    -		return pkg.GoVersion()
    +// FileVersion returns a file's Go version.
    +// The reported version is an unknown Future version if a
    +// version cannot be determined.
    +func FileVersion(info *types.Info, file *ast.File) string {
    +	// In tools built with Go >= 1.22, the Go version of a file
    +	// follow a cascades of sources:
    +	// 1) types.Info.FileVersion, which follows the cascade:
    +	//   1.a) file version (ast.File.GoVersion),
    +	//   1.b) the package version (types.Config.GoVersion), or
    +	// 2) is some unknown Future version.
    +	//
    +	// File versions require a valid package version to be provided to types
    +	// in Config.GoVersion. Config.GoVersion is either from the package's module
    +	// or the toolchain (go run). This value should be provided by go/packages
    +	// or unitchecker.Config.GoVersion.
    +	if v := info.FileVersions[file]; IsValid(v) {
    +		return v
     	}
    -	return ""
    +	// Note: we could instead return runtime.Version() [if valid].
    +	// This would act as a max version on what a tool can support.
    +	return Future
     }
    diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
    deleted file mode 100644
    index b4345d3349..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -// Copyright 2023 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.22
    -// +build !go1.22
    -
    -package versions
    -
    -import (
    -	"go/ast"
    -	"go/types"
    -)
    -
    -// FileVersion returns a language version (<=1.21) derived from runtime.Version()
    -// or an unknown future version.
    -func FileVersion(info *types.Info, file *ast.File) string {
    -	// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
    -	// available. We use a go version derived from the toolchain used to
    -	// compile the tool by default.
    -	// This will be <= go1.21. We take this as the maximum version that
    -	// this tool can support.
    -	//
    -	// There are no features currently in x/tools that need to tell fine grained
    -	// differences for versions <1.22.
    -	return toolchain
    -}
    -
    -// InitFileVersions is a noop when compiled with this Go version.
    -func InitFileVersions(*types.Info) {}
    diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
    deleted file mode 100644
    index e8180632a5..0000000000
    --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
    +++ /dev/null
    @@ -1,41 +0,0 @@
    -// Copyright 2023 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.22
    -// +build go1.22
    -
    -package versions
    -
    -import (
    -	"go/ast"
    -	"go/types"
    -)
    -
    -// FileVersions returns a file's Go version.
    -// The reported version is an unknown Future version if a
    -// version cannot be determined.
    -func FileVersion(info *types.Info, file *ast.File) string {
    -	// In tools built with Go >= 1.22, the Go version of a file
    -	// follow a cascades of sources:
    -	// 1) types.Info.FileVersion, which follows the cascade:
    -	//   1.a) file version (ast.File.GoVersion),
    -	//   1.b) the package version (types.Config.GoVersion), or
    -	// 2) is some unknown Future version.
    -	//
    -	// File versions require a valid package version to be provided to types
    -	// in Config.GoVersion. Config.GoVersion is either from the package's module
    -	// or the toolchain (go run). This value should be provided by go/packages
    -	// or unitchecker.Config.GoVersion.
    -	if v := info.FileVersions[file]; IsValid(v) {
    -		return v
    -	}
    -	// Note: we could instead return runtime.Version() [if valid].
    -	// This would act as a max version on what a tool can support.
    -	return Future
    -}
    -
    -// InitFileVersions initializes info to record Go versions for Go files.
    -func InitFileVersions(info *types.Info) {
    -	info.FileVersions = make(map[*ast.File]string)
    -}
    diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
    index b5e38c6628..6818b2de30 100644
    --- a/vendor/google.golang.org/api/googleapi/googleapi.go
    +++ b/vendor/google.golang.org/api/googleapi/googleapi.go
    @@ -200,7 +200,17 @@ var WithDataWrapper = MarshalStyle(true)
     // WithoutDataWrapper marshals JSON without a {"data": ...} wrapper.
     var WithoutDataWrapper = MarshalStyle(false)
     
    +// JSONReader is like JSONBuffer, but returns an io.Reader instead.
     func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
    +	buf, err := wrap.JSONBuffer(v)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return buf, nil
    +}
    +
    +// JSONBuffer encodes the body and wraps it if needed.
    +func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) {
     	buf := new(bytes.Buffer)
     	if wrap {
     		buf.Write([]byte(`{"data": `))
    @@ -259,6 +269,20 @@ func ChunkSize(size int) MediaOption {
     	return chunkSizeOption(size)
     }
     
    +type chunkTransferTimeoutOption time.Duration
    +
    +func (cd chunkTransferTimeoutOption) setOptions(o *MediaOptions) {
    +	o.ChunkTransferTimeout = time.Duration(cd)
    +}
    +
    +// ChunkTransferTimeout returns a MediaOption which sets a per-chunk
    +// transfer timeout for resumable uploads. If a single chunk has been
    +// attempting to upload for longer than this time then the old req got canceled and retried.
    +// The default is no timeout for the request.
    +func ChunkTransferTimeout(timeout time.Duration) MediaOption {
    +	return chunkTransferTimeoutOption(timeout)
    +}
    +
     type chunkRetryDeadlineOption time.Duration
     
     func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) {
    @@ -283,6 +307,7 @@ type MediaOptions struct {
     	ForceEmptyContentType bool
     	ChunkSize             int
     	ChunkRetryDeadline    time.Duration
    +	ChunkTransferTimeout  time.Duration
     }
     
     // ProcessMediaOptions stores options from opts in a MediaOptions.
    diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
    index b648930985..86861e2438 100644
    --- a/vendor/google.golang.org/api/internal/creds.go
    +++ b/vendor/google.golang.org/api/internal/creds.go
    @@ -15,6 +15,9 @@ import (
     	"os"
     	"time"
     
    +	"cloud.google.com/go/auth"
    +	"cloud.google.com/go/auth/credentials"
    +	"cloud.google.com/go/auth/oauth2adapt"
     	"golang.org/x/oauth2"
     	"google.golang.org/api/internal/cert"
     	"google.golang.org/api/internal/impersonate"
    @@ -27,6 +30,9 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
     // Creds returns credential information obtained from DialSettings, or if none, then
     // it returns default credential information.
     func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
    +	if ds.IsNewAuthLibraryEnabled() {
    +		return credsNewAuth(ds)
    +	}
     	creds, err := baseCreds(ctx, ds)
     	if err != nil {
     		return nil, err
    @@ -37,6 +43,106 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
     	return creds, nil
     }
     
    +// AuthCreds returns [cloud.google.com/go/auth.Credentials] based on credentials
    +// options provided via [option.ClientOption], including legacy oauth2/google
    +// options. If there are no applicable options, then it returns the result of
    +// [cloud.google.com/go/auth/credentials.DetectDefault].
    +func AuthCreds(ctx context.Context, settings *DialSettings) (*auth.Credentials, error) {
    +	if settings.AuthCredentials != nil {
    +		return settings.AuthCredentials, nil
    +	}
    +	// Support oauth2/google options
    +	var oauth2Creds *google.Credentials
    +	if settings.InternalCredentials != nil {
    +		oauth2Creds = settings.InternalCredentials
    +	} else if settings.Credentials != nil {
    +		oauth2Creds = settings.Credentials
    +	} else if settings.TokenSource != nil {
    +		oauth2Creds = &google.Credentials{TokenSource: settings.TokenSource}
    +	}
    +	if oauth2Creds != nil {
    +		return oauth2adapt.AuthCredentialsFromOauth2Credentials(oauth2Creds), nil
    +	}
    +
    +	return detectDefaultFromDialSettings(settings)
    +}
    +
    +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport.
    +// The OAuth2 transport and endpoint will be configured for mTLS if applicable.
    +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) {
    +	clientCertSource, err := getClientCertificateSource(settings)
    +	if err != nil {
    +		return "", nil, err
    +	}
    +	tokenURL := oAuth2Endpoint(clientCertSource)
    +	var oauth2Client *http.Client
    +	if clientCertSource != nil {
    +		tlsConfig := &tls.Config{
    +			GetClientCertificate: clientCertSource,
    +		}
    +		oauth2Client = customHTTPClient(tlsConfig)
    +	} else {
    +		oauth2Client = oauth2.NewClient(ctx, nil)
    +	}
    +	return tokenURL, oauth2Client, nil
    +}
    +
    +func credsNewAuth(settings *DialSettings) (*google.Credentials, error) {
    +	// Preserve old options behavior
    +	if settings.InternalCredentials != nil {
    +		return settings.InternalCredentials, nil
    +	} else if settings.Credentials != nil {
    +		return settings.Credentials, nil
    +	} else if settings.TokenSource != nil {
    +		return &google.Credentials{TokenSource: settings.TokenSource}, nil
    +	}
    +
    +	if settings.AuthCredentials != nil {
    +		return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil
    +	}
    +
    +	creds, err := detectDefaultFromDialSettings(settings)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil
    +}
    +
    +func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, error) {
    +	var useSelfSignedJWT bool
    +	var aud string
    +	var scopes []string
    +	// If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
    +	if settings.EnableJwtWithScope || len(settings.Audiences) > 0 {
    +		useSelfSignedJWT = true
    +	}
    +
    +	if len(settings.Scopes) > 0 {
    +		scopes = make([]string, len(settings.Scopes))
    +		copy(scopes, settings.Scopes)
    +	}
    +	if len(settings.Audiences) > 0 {
    +		aud = settings.Audiences[0]
    +	}
    +	// Only default scopes if user did not also set an audience.
    +	if len(settings.Scopes) == 0 && aud == "" && len(settings.DefaultScopes) > 0 {
    +		scopes = make([]string, len(settings.DefaultScopes))
    +		copy(scopes, settings.DefaultScopes)
    +	}
    +	if len(scopes) == 0 && aud == "" {
    +		aud = settings.DefaultAudience
    +	}
    +
    +	return credentials.DetectDefault(&credentials.DetectOptions{
    +		Scopes:           scopes,
    +		Audience:         aud,
    +		CredentialsFile:  settings.CredentialsFile,
    +		CredentialsJSON:  settings.CredentialsJSON,
    +		UseSelfSignedJWT: useSelfSignedJWT,
    +		Logger:           settings.Logger,
    +	})
    +}
    +
     func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
     	if ds.InternalCredentials != nil {
     		return ds.InternalCredentials, nil
    @@ -44,7 +150,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro
     	if ds.Credentials != nil {
     		return ds.Credentials, nil
     	}
    -	if ds.CredentialsJSON != nil {
    +	if len(ds.CredentialsJSON) > 0 {
     		return credentialsFromJSON(ctx, ds.CredentialsJSON, ds)
     	}
     	if ds.CredentialsFile != "" {
    @@ -89,19 +195,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g
     	var params google.CredentialsParams
     	params.Scopes = ds.GetScopes()
     
    -	// Determine configurations for the OAuth2 transport, which is separate from the API transport.
    -	// The OAuth2 transport and endpoint will be configured for mTLS if applicable.
    -	clientCertSource, err := getClientCertificateSource(ds)
    +	tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds)
     	if err != nil {
     		return nil, err
     	}
    -	params.TokenURL = oAuth2Endpoint(clientCertSource)
    -	if clientCertSource != nil {
    -		tlsConfig := &tls.Config{
    -			GetClientCertificate: clientCertSource,
    -		}
    -		ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig))
    -	}
    +	params.TokenURL = tokenURL
    +	ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client)
     
     	// By default, a standard OAuth 2.0 token source is created
     	cred, err := google.CredentialsFromJSONWithParams(ctx, data, params)
    @@ -226,14 +325,3 @@ func baseTransport() *http.Transport {
     		ExpectContinueTimeout: 1 * time.Second,
     	}
     }
    -
    -// ErrUniverseNotMatch composes an error string from the provided universe
    -// domain sources (DialSettings and Credentials, respectively).
    -func ErrUniverseNotMatch(settingsUD, credsUD string) error {
    -	return fmt.Errorf(
    -		"the configured universe domain (%q) does not match the universe "+
    -			"domain found in the credentials (%q). If you haven't configured "+
    -			"WithUniverseDomain explicitly, \"googleapis.com\" is the default",
    -		settingsUD,
    -		credsUD)
    -}
    diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
    index e17141a6f5..4f5b1a0ebe 100644
    --- a/vendor/google.golang.org/api/internal/settings.go
    +++ b/vendor/google.golang.org/api/internal/settings.go
    @@ -8,11 +8,13 @@ package internal
     import (
     	"crypto/tls"
     	"errors"
    +	"log/slog"
     	"net/http"
     	"os"
     	"strconv"
     	"time"
     
    +	"cloud.google.com/go/auth"
     	"golang.org/x/oauth2"
     	"golang.org/x/oauth2/google"
     	"google.golang.org/api/internal/impersonate"
    @@ -20,8 +22,10 @@ import (
     )
     
     const (
    -	newAuthLibEnVar       = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB"
    -	universeDomainDefault = "googleapis.com"
    +	newAuthLibEnvVar        = "GOOGLE_API_GO_EXPERIMENTAL_ENABLE_NEW_AUTH_LIB"
    +	newAuthLibDisabledEnVar = "GOOGLE_API_GO_EXPERIMENTAL_DISABLE_NEW_AUTH_LIB"
    +	universeDomainEnvVar    = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
    +	defaultUniverseDomain   = "googleapis.com"
     )
     
     // DialSettings holds information needed to establish a connection with a
    @@ -56,15 +60,21 @@ type DialSettings struct {
     	ImpersonationConfig           *impersonate.Config
     	EnableDirectPath              bool
     	EnableDirectPathXds           bool
    -	EnableNewAuthLibrary          bool
     	AllowNonDefaultServiceAccount bool
    -	UniverseDomain                string
     	DefaultUniverseDomain         string
    -
    +	UniverseDomain                string
    +	Logger                        *slog.Logger
     	// Google API system parameters. For more information please read:
     	// https://cloud.google.com/apis/docs/system-parameters
     	QuotaProject  string
     	RequestReason string
    +
    +	// New Auth library Options
    +	AuthCredentials      *auth.Credentials
    +	EnableNewAuthLibrary bool
    +
    +	// TODO(b/372244283): Remove after b/358175516 has been fixed
    +	EnableAsyncRefreshDryRun func()
     }
     
     // GetScopes returns the user-provided scopes, if set, or else falls back to the
    @@ -89,11 +99,17 @@ func (ds *DialSettings) HasCustomAudience() bool {
     	return len(ds.Audiences) > 0
     }
     
    +// IsNewAuthLibraryEnabled returns true if the new auth library should be used.
     func (ds *DialSettings) IsNewAuthLibraryEnabled() bool {
    +	// Disabled env is for future rollouts to make sure there is a way to easily
    +	// disable this behaviour once we switch in on by default.
    +	if b, err := strconv.ParseBool(os.Getenv(newAuthLibDisabledEnVar)); err == nil && b {
    +		return false
    +	}
     	if ds.EnableNewAuthLibrary {
     		return true
     	}
    -	if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil {
    +	if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnvVar)); err == nil {
     		return b
     	}
     	return false
    @@ -115,7 +131,7 @@ func (ds *DialSettings) Validate() error {
     	if ds.Credentials != nil {
     		nCreds++
     	}
    -	if ds.CredentialsJSON != nil {
    +	if len(ds.CredentialsJSON) > 0 {
     		nCreds++
     	}
     	if ds.CredentialsFile != "" {
    @@ -164,35 +180,37 @@ func (ds *DialSettings) Validate() error {
     	return nil
     }
     
    -// GetDefaultUniverseDomain returns the default service domain for a given Cloud
    -// universe, as configured with internaloption.WithDefaultUniverseDomain.
    -// The default value is "googleapis.com".
    +// GetDefaultUniverseDomain returns the Google default universe domain
    +// ("googleapis.com").
     func (ds *DialSettings) GetDefaultUniverseDomain() string {
    -	if ds.DefaultUniverseDomain == "" {
    -		return universeDomainDefault
    -	}
    -	return ds.DefaultUniverseDomain
    +	return defaultUniverseDomain
     }
     
     // GetUniverseDomain returns the default service domain for a given Cloud
    -// universe, as configured with option.WithUniverseDomain.
    -// The default value is the value of GetDefaultUniverseDomain, as configured
    -// with internaloption.WithDefaultUniverseDomain.
    +// universe, with the following precedence:
    +//
    +// 1. A non-empty option.WithUniverseDomain.
    +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
    +// 3. The default value "googleapis.com".
     func (ds *DialSettings) GetUniverseDomain() string {
    -	if ds.UniverseDomain == "" {
    -		return ds.GetDefaultUniverseDomain()
    +	if ds.UniverseDomain != "" {
    +		return ds.UniverseDomain
    +	}
    +	if envUD := os.Getenv(universeDomainEnvVar); envUD != "" {
    +		return envUD
     	}
    -	return ds.UniverseDomain
    +	return defaultUniverseDomain
     }
     
    +// IsUniverseDomainGDU returns true if the universe domain is the default Google
    +// universe ("googleapis.com").
     func (ds *DialSettings) IsUniverseDomainGDU() bool {
    -	return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain()
    +	return ds.GetUniverseDomain() == defaultUniverseDomain
     }
     
     // GetUniverseDomain returns the default service domain for a given Cloud
    -// universe, from google.Credentials, for comparison with the value returned by
    -// (*DialSettings).GetUniverseDomain. This wrapper function should be removed
    -// to close [TODO(chrisdsmith): issue link here]. See details below.
    +// universe, from google.Credentials. This wrapper function should be removed
    +// to close https://github.com/googleapis/google-api-go-client/issues/2399.
     func GetUniverseDomain(creds *google.Credentials) (string, error) {
     	timer := time.NewTimer(time.Second)
     	defer timer.Stop()
    @@ -209,9 +227,10 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) {
     	}()
     
     	select {
    -	case err := <-errors:
    -		// An error that is returned before the timer expires is legitimate.
    -		return "", err
    +	case <-errors:
    +		// An error that is returned before the timer expires is likely to be
    +		// connection refused. Temporarily (2024-03-21) return the GDU domain.
    +		return defaultUniverseDomain, nil
     	case res := <-results:
     		return res, nil
     	case <-timer.C: // Timer is expired.
    @@ -223,6 +242,6 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) {
     		// calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go
     		// and remove this method to close
     		// https://github.com/googleapis/google-api-go-client/issues/2399.
    -		return universeDomainDefault, nil
    +		return defaultUniverseDomain, nil
     	}
     }
    diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
    index 7459644d70..532de375dd 100644
    --- a/vendor/google.golang.org/api/internal/version.go
    +++ b/vendor/google.golang.org/api/internal/version.go
    @@ -5,4 +5,4 @@
     package internal
     
     // Version is the current tagged release of the library.
    -const Version = "0.162.0"
    +const Version = "0.213.0"
    diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
    index e6b5c10255..c63c0c194a 100644
    --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
    +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
    @@ -6,6 +6,11 @@
     package internaloption
     
     import (
    +	"context"
    +	"log/slog"
    +
    +	"cloud.google.com/go/auth"
    +	"github.com/googleapis/gax-go/v2/internallog"
     	"golang.org/x/oauth2/google"
     	"google.golang.org/api/internal"
     	"google.golang.org/api/option"
    @@ -206,9 +211,79 @@ func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) {
     	o.EnableNewAuthLibrary = bool(w)
     }
     
    +// EnableAsyncRefreshDryRun returns a ClientOption that specifies if libraries in this
    +// module should asynchronously refresh auth token in parallel to sync refresh.
    +//
    +// This option can be used to determine whether refreshing the token asymnchronously
    +// prior to its actual expiry works without any issues in a particular environment.
    +//
    +// errHandler function will be called when there is an error while refreshing
    +// the token asynchronously.
    +//
    +// This is an EXPERIMENTAL option and will be removed in the future.
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +func EnableAsyncRefreshDryRun(errHandler func()) option.ClientOption {
    +	return enableAsyncRefreshDryRun{
    +		errHandler: errHandler,
    +	}
    +}
    +
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +type enableAsyncRefreshDryRun struct {
    +	errHandler func()
    +}
    +
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +func (w enableAsyncRefreshDryRun) Apply(o *internal.DialSettings) {
    +	o.EnableAsyncRefreshDryRun = w.errHandler
    +}
    +
     // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to
     // create their own client options by embedding this type into their own
     // client-specific option wrapper. See example for usage.
     type EmbeddableAdapter struct{}
     
     func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {}
    +
    +// GetLogger is a helper for client libraries to extract the [slog.Logger] from
    +// the provided options or return a default logger if one is not found.
    +//
    +// It should only be used internally by generated clients. This is an EXPERIMENTAL API
    +// and may be changed or removed in the future.
    +func GetLogger(opts []option.ClientOption) *slog.Logger {
    +	var ds internal.DialSettings
    +	for _, opt := range opts {
    +		opt.Apply(&ds)
    +	}
    +	return internallog.New(ds.Logger)
    +}
    +
    +// AuthCreds returns [cloud.google.com/go/auth.Credentials] using the following
    +// options provided via [option.ClientOption], including legacy oauth2/google
    +// options, in this order:
    +//
    +// * [option.WithAuthCredentials]
    +// * [option/internaloption.WithCredentials] (internal use only)
    +// * [option.WithCredentials]
    +// * [option.WithTokenSource]
    +//
    +// If there are no applicable credentials options, then it passes the
    +// following options to [cloud.google.com/go/auth/credentials.DetectDefault] and
    +// returns the result:
    +//
    +// * [option.WithAudiences]
    +// * [option.WithCredentialsFile]
    +// * [option.WithCredentialsJSON]
    +// * [option.WithScopes]
    +// * [option/internaloption.WithDefaultScopes] (internal use only)
    +// * [option/internaloption.EnableJwtWithScope] (internal use only)
    +//
    +// This function should only be used internally by generated clients. This is an
    +// EXPERIMENTAL API and may be changed or removed in the future.
    +func AuthCreds(ctx context.Context, opts []option.ClientOption) (*auth.Credentials, error) {
    +	var ds internal.DialSettings
    +	for _, opt := range opts {
    +		opt.Apply(&ds)
    +	}
    +	return internal.AuthCreds(ctx, &ds)
    +}
    diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
    index c882c1eb48..eb54813aae 100644
    --- a/vendor/google.golang.org/api/option/option.go
    +++ b/vendor/google.golang.org/api/option/option.go
    @@ -7,8 +7,10 @@ package option
     
     import (
     	"crypto/tls"
    +	"log/slog"
     	"net/http"
     
    +	"cloud.google.com/go/auth"
     	"golang.org/x/oauth2"
     	"golang.org/x/oauth2/google"
     	"google.golang.org/api/internal"
    @@ -344,9 +346,20 @@ func WithCredentials(creds *google.Credentials) ClientOption {
     	return (*withCreds)(creds)
     }
     
    +// WithAuthCredentials returns a ClientOption that specifies an
    +// [cloud.google.com/go/auth.Credentials] to be used as the basis for
    +// authentication.
    +func WithAuthCredentials(creds *auth.Credentials) ClientOption {
    +	return withAuthCredentials{creds}
    +}
    +
    +type withAuthCredentials struct{ creds *auth.Credentials }
    +
    +func (w withAuthCredentials) Apply(o *internal.DialSettings) {
    +	o.AuthCredentials = w.creds
    +}
    +
     // WithUniverseDomain returns a ClientOption that sets the universe domain.
    -//
    -// This is an EXPERIMENTAL API and may be changed or removed in the future.
     func WithUniverseDomain(ud string) ClientOption {
     	return withUniverseDomain(ud)
     }
    @@ -356,3 +369,17 @@ type withUniverseDomain string
     func (w withUniverseDomain) Apply(o *internal.DialSettings) {
     	o.UniverseDomain = string(w)
     }
    +
    +// WithLogger returns a ClientOption that sets the logger used throughout the
    +// client library call stack. If this option is provided it takes precedence
    +// over the value set in GOOGLE_SDK_GO_LOGGING_LEVEL. Specifying this option
    +// enables logging at the provided logger's configured level.
    +func WithLogger(l *slog.Logger) ClientOption {
    +	return withLogger{l}
    +}
    +
    +type withLogger struct{ l *slog.Logger }
    +
    +func (w withLogger) Apply(o *internal.DialSettings) {
    +	o.Logger = w.l
    +}
    diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
    index 02a0857ff1..a354d223d3 100644
    --- a/vendor/google.golang.org/api/transport/grpc/dial.go
    +++ b/vendor/google.golang.org/api/transport/grpc/dial.go
    @@ -17,8 +17,11 @@ import (
     	"sync"
     	"time"
     
    +	"cloud.google.com/go/auth"
    +	"cloud.google.com/go/auth/credentials"
    +	"cloud.google.com/go/auth/grpctransport"
    +	"cloud.google.com/go/auth/oauth2adapt"
     	"cloud.google.com/go/compute/metadata"
    -	"go.opencensus.io/plugin/ocgrpc"
     	"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
     	"golang.org/x/oauth2"
     	"golang.org/x/time/rate"
    @@ -49,6 +52,9 @@ var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second}
     // Assign to var for unit test replacement
     var dialContext = grpc.DialContext
     
    +// Assign to var for unit test replacement
    +var dialContextNewAuth = grpctransport.Dial
    +
     // otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
     // all dial connections to avoid the memory leak documented in
     // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
    @@ -79,6 +85,13 @@ func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, e
     	if o.GRPCConnPool != nil {
     		return o.GRPCConnPool.Conn(), nil
     	}
    +	if o.IsNewAuthLibraryEnabled() {
    +		pool, err := dialPoolNewAuth(ctx, true, 1, o)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return pool.Connection(), nil
    +	}
     	// NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize)
     	// on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it.
     	//
    @@ -94,6 +107,13 @@ func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.Clien
     	if err != nil {
     		return nil, err
     	}
    +	if o.IsNewAuthLibraryEnabled() {
    +		pool, err := dialPoolNewAuth(ctx, false, 1, o)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return pool.Connection(), nil
    +	}
     	return dial(ctx, true, o)
     }
     
    @@ -112,6 +132,18 @@ func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error
     	if o.GRPCConnPool != nil {
     		return o.GRPCConnPool, nil
     	}
    +
    +	if o.IsNewAuthLibraryEnabled() {
    +		if o.GRPCConn != nil {
    +			return &singleConnPool{o.GRPCConn}, nil
    +		}
    +		pool, err := dialPoolNewAuth(ctx, true, o.GRPCConnPoolSize, o)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return &poolAdapter{pool}, nil
    +	}
    +
     	poolSize := o.GRPCConnPoolSize
     	if o.GRPCConn != nil {
     		// WithGRPCConn is technically incompatible with WithGRPCConnectionPool.
    @@ -141,6 +173,130 @@ func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error
     	return pool, nil
     }
     
    +// dialPoolNewAuth is an adapter to call new auth library.
    +func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *internal.DialSettings) (grpctransport.GRPCClientConnPool, error) {
    +	// honor options if set
    +	var creds *auth.Credentials
    +	if ds.InternalCredentials != nil {
    +		creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials)
    +	} else if ds.Credentials != nil {
    +		creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials)
    +	} else if ds.AuthCredentials != nil {
    +		creds = ds.AuthCredentials
    +	} else if ds.TokenSource != nil {
    +		credOpts := &auth.CredentialsOptions{
    +			TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource),
    +		}
    +		if ds.QuotaProject != "" {
    +			credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
    +				return ds.QuotaProject, nil
    +			})
    +		}
    +		creds = auth.NewCredentials(credOpts)
    +	}
    +
    +	var skipValidation bool
    +	// If our clients explicitly setup the credential skip validation as it is
    +	// assumed correct
    +	if ds.SkipValidation || ds.InternalCredentials != nil {
    +		skipValidation = true
    +	}
    +
    +	var aud string
    +	if len(ds.Audiences) > 0 {
    +		aud = ds.Audiences[0]
    +	}
    +	metadata := map[string]string{}
    +	if ds.QuotaProject != "" {
    +		metadata["X-goog-user-project"] = ds.QuotaProject
    +	}
    +	if ds.RequestReason != "" {
    +		metadata["X-goog-request-reason"] = ds.RequestReason
    +	}
    +
    +	// Defaults for older clients that don't set this value yet
    +	defaultEndpointTemplate := ds.DefaultEndpointTemplate
    +	if defaultEndpointTemplate == "" {
    +		defaultEndpointTemplate = ds.DefaultEndpoint
    +	}
    +
    +	pool, err := dialContextNewAuth(ctx, secure, &grpctransport.Options{
    +		DisableTelemetry:      ds.TelemetryDisabled,
    +		DisableAuthentication: ds.NoAuth,
    +		Endpoint:              ds.Endpoint,
    +		Metadata:              metadata,
    +		GRPCDialOpts:          prepareDialOptsNewAuth(ds),
    +		PoolSize:              poolSize,
    +		Credentials:           creds,
    +		ClientCertProvider:    ds.ClientCertSource,
    +		APIKey:                ds.APIKey,
    +		DetectOpts: &credentials.DetectOptions{
    +			Scopes:          ds.Scopes,
    +			Audience:        aud,
    +			CredentialsFile: ds.CredentialsFile,
    +			CredentialsJSON: ds.CredentialsJSON,
    +			Logger:          ds.Logger,
    +		},
    +		InternalOptions: &grpctransport.InternalOptions{
    +			EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount,
    +			EnableDirectPath:                ds.EnableDirectPath,
    +			EnableDirectPathXds:             ds.EnableDirectPathXds,
    +			EnableJWTWithScope:              ds.EnableJwtWithScope,
    +			DefaultAudience:                 ds.DefaultAudience,
    +			DefaultEndpointTemplate:         defaultEndpointTemplate,
    +			DefaultMTLSEndpoint:             ds.DefaultMTLSEndpoint,
    +			DefaultScopes:                   ds.DefaultScopes,
    +			SkipValidation:                  skipValidation,
    +		},
    +		UniverseDomain: ds.UniverseDomain,
    +		Logger:         ds.Logger,
    +	})
    +	return pool, err
    +}
    +
    +func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption {
    +	var opts []grpc.DialOption
    +	if ds.UserAgent != "" {
    +		opts = append(opts, grpc.WithUserAgent(ds.UserAgent))
    +	}
    +
    +	return append(opts, ds.GRPCDialOpts...)
    +}
    +
    +// dryRunAsync is a wrapper for oauth2.TokenSource that performs a sync refresh
    +// after an async refresh. Token generated by async refresh is not used.
    +//
    +// This is an EXPERIMENTAL feature and may be removed or changed in the future.
    +// It is a temporary struct to determine if the async refresh
    +// is working properly.
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +type dryRunAsync struct {
    +	asyncTokenSource oauth2.TokenSource
    +	syncTokenSource  oauth2.TokenSource
    +	errHandler       func()
    +}
    +
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +func newDryRunAsync(ts oauth2.TokenSource, errHandler func()) dryRunAsync {
    +	tp := auth.NewCachedTokenProvider(oauth2adapt.TokenProviderFromTokenSource(ts), nil)
    +	asyncTs := oauth2adapt.TokenSourceFromTokenProvider(tp)
    +	return dryRunAsync{
    +		syncTokenSource:  ts,
    +		asyncTokenSource: asyncTs,
    +		errHandler:       errHandler,
    +	}
    +}
    +
    +// Token returns a token or an error.
    +// TODO(b/372244283): Remove after b/358175516 has been fixed
    +func (async dryRunAsync) Token() (*oauth2.Token, error) {
    +	_, err := async.asyncTokenSource.Token()
    +	if err != nil {
    +		async.errHandler()
    +	}
    +	return async.syncTokenSource.Token()
    +}
    +
     func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) {
     	if o.HTTPClient != nil {
     		return nil, errors.New("unsupported HTTP client specified")
    @@ -177,19 +333,14 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
     			if err != nil {
     				return nil, err
     			}
    -			credsUniverseDomain, err := internal.GetUniverseDomain(creds)
    -			if err != nil {
    -				return nil, err
    -			}
    -			if o.TokenSource == nil {
    -				// We only validate non-tokensource creds, as TokenSource-based credentials
    -				// don't propagate universe.
    -				if o.GetUniverseDomain() != credsUniverseDomain {
    -					return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain)
    -				}
    +
    +			ts := creds.TokenSource
    +			// TODO(b/372244283): Remove after b/358175516 has been fixed
    +			if o.EnableAsyncRefreshDryRun != nil {
    +				ts = newDryRunAsync(ts, o.EnableAsyncRefreshDryRun)
     			}
     			grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{
    -				TokenSource:   oauth.TokenSource{TokenSource: creds.TokenSource},
    +				TokenSource:   oauth.TokenSource{TokenSource: ts},
     				quotaProject:  internal.GetQuotaProject(creds, o.QuotaProject),
     				requestReason: o.RequestReason,
     			}))
    @@ -235,7 +386,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
     	// Add tracing, but before the other options, so that clients can override the
     	// gRPC stats handler.
     	// This assumes that gRPC options are processed in order, left to right.
    -	grpcOpts = addOCStatsHandler(grpcOpts, o)
     	grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o)
     	grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
     	if o.UserAgent != "" {
    @@ -245,13 +395,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
     	return dialContext(ctx, endpoint, grpcOpts...)
     }
     
    -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
    -	if settings.TelemetryDisabled {
    -		return opts
    -	}
    -	return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
    -}
    -
     func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
     	if settings.TelemetryDisabled {
     		return opts
    diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go
    index 4cf94a2771..c731293d84 100644
    --- a/vendor/google.golang.org/api/transport/grpc/pool.go
    +++ b/vendor/google.golang.org/api/transport/grpc/pool.go
    @@ -9,6 +9,7 @@ import (
     	"fmt"
     	"sync/atomic"
     
    +	"cloud.google.com/go/auth/grpctransport"
     	"google.golang.org/api/internal"
     	"google.golang.org/grpc"
     )
    @@ -90,3 +91,27 @@ func (m multiError) Error() string {
     	}
     	return fmt.Sprintf("%s (and %d other errors)", s, n-1)
     }
    +
    +type poolAdapter struct {
    +	pool grpctransport.GRPCClientConnPool
    +}
    +
    +func (p *poolAdapter) Conn() *grpc.ClientConn {
    +	return p.pool.Connection()
    +}
    +
    +func (p *poolAdapter) Num() int {
    +	return p.pool.Len()
    +}
    +
    +func (p *poolAdapter) Close() error {
    +	return p.pool.Close()
    +}
    +
    +func (p *poolAdapter) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
    +	return p.pool.Invoke(ctx, method, args, reply, opts...)
    +}
    +
    +func (p *poolAdapter) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
    +	return p.pool.NewStream(ctx, desc, method, opts...)
    +}
    diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
    index ce5e4fa5cc..6b7ea74ba4 100644
    --- a/vendor/google.golang.org/api/transport/http/dial.go
    +++ b/vendor/google.golang.org/api/transport/http/dial.go
    @@ -15,7 +15,10 @@ import (
     	"net/http"
     	"time"
     
    -	"go.opencensus.io/plugin/ochttp"
    +	"cloud.google.com/go/auth"
    +	"cloud.google.com/go/auth/credentials"
    +	"cloud.google.com/go/auth/httptransport"
    +	"cloud.google.com/go/auth/oauth2adapt"
     	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
     	"golang.org/x/net/http2"
     	"golang.org/x/oauth2"
    @@ -23,7 +26,6 @@ import (
     	"google.golang.org/api/internal"
     	"google.golang.org/api/internal/cert"
     	"google.golang.org/api/option"
    -	"google.golang.org/api/transport/http/internal/propagation"
     )
     
     // NewClient returns an HTTP client for use communicating with a Google cloud
    @@ -43,6 +45,13 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client,
     		return settings.HTTPClient, endpoint, nil
     	}
     
    +	if settings.IsNewAuthLibraryEnabled() {
    +		client, err := newClientNewAuth(ctx, nil, settings)
    +		if err != nil {
    +			return nil, "", err
    +		}
    +		return client, endpoint, nil
    +	}
     	trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings)
     	if err != nil {
     		return nil, "", err
    @@ -50,6 +59,85 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client,
     	return &http.Client{Transport: trans}, endpoint, nil
     }
     
    +// newClientNewAuth is an adapter to call new auth library.
    +func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.DialSettings) (*http.Client, error) {
    +	// honor options if set
    +	var creds *auth.Credentials
    +	if ds.InternalCredentials != nil {
    +		creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials)
    +	} else if ds.Credentials != nil {
    +		creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials)
    +	} else if ds.AuthCredentials != nil {
    +		creds = ds.AuthCredentials
    +	} else if ds.TokenSource != nil {
    +		credOpts := &auth.CredentialsOptions{
    +			TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource),
    +		}
    +		if ds.QuotaProject != "" {
    +			credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
    +				return ds.QuotaProject, nil
    +			})
    +		}
    +		creds = auth.NewCredentials(credOpts)
    +	}
    +
    +	var skipValidation bool
    +	// If our clients explicitly setup the credential skip validation as it is
    +	// assumed correct
    +	if ds.SkipValidation || ds.InternalCredentials != nil {
    +		skipValidation = true
    +	}
    +
    +	// Defaults for older clients that don't set this value yet
    +	defaultEndpointTemplate := ds.DefaultEndpointTemplate
    +	if defaultEndpointTemplate == "" {
    +		defaultEndpointTemplate = ds.DefaultEndpoint
    +	}
    +
    +	var aud string
    +	if len(ds.Audiences) > 0 {
    +		aud = ds.Audiences[0]
    +	}
    +	headers := http.Header{}
    +	if ds.QuotaProject != "" {
    +		headers.Set("X-goog-user-project", ds.QuotaProject)
    +	}
    +	if ds.RequestReason != "" {
    +		headers.Set("X-goog-request-reason", ds.RequestReason)
    +	}
    +	client, err := httptransport.NewClient(&httptransport.Options{
    +		DisableTelemetry:      ds.TelemetryDisabled,
    +		DisableAuthentication: ds.NoAuth,
    +		Headers:               headers,
    +		Endpoint:              ds.Endpoint,
    +		APIKey:                ds.APIKey,
    +		Credentials:           creds,
    +		ClientCertProvider:    ds.ClientCertSource,
    +		BaseRoundTripper:      base,
    +		DetectOpts: &credentials.DetectOptions{
    +			Scopes:          ds.Scopes,
    +			Audience:        aud,
    +			CredentialsFile: ds.CredentialsFile,
    +			CredentialsJSON: ds.CredentialsJSON,
    +			Logger:          ds.Logger,
    +		},
    +		InternalOptions: &httptransport.InternalOptions{
    +			EnableJWTWithScope:      ds.EnableJwtWithScope,
    +			DefaultAudience:         ds.DefaultAudience,
    +			DefaultEndpointTemplate: defaultEndpointTemplate,
    +			DefaultMTLSEndpoint:     ds.DefaultMTLSEndpoint,
    +			DefaultScopes:           ds.DefaultScopes,
    +			SkipValidation:          skipValidation,
    +		},
    +		UniverseDomain: ds.UniverseDomain,
    +		Logger:         ds.Logger,
    +	})
    +	if err != nil {
    +		return nil, err
    +	}
    +	return client, nil
    +}
    +
     // NewTransport creates an http.RoundTripper for use communicating with a Google
     // cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
     func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
    @@ -60,6 +148,13 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl
     	if settings.HTTPClient != nil {
     		return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
     	}
    +	if settings.IsNewAuthLibraryEnabled() {
    +		client, err := newClientNewAuth(ctx, base, settings)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return client.Transport, nil
    +	}
     	return newTransport(ctx, base, settings)
     }
     
    @@ -70,10 +165,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna
     		requestReason: settings.RequestReason,
     	}
     	var trans http.RoundTripper = paramTransport
    -	// Give OpenTelemetry precedence over OpenCensus in case user configuration
    -	// causes both to write the same header (`X-Cloud-Trace-Context`).
     	trans = addOpenTelemetryTransport(trans, settings)
    -	trans = addOCTransport(trans, settings)
     	switch {
     	case settings.NoAuth:
     		// Do nothing.
    @@ -88,17 +180,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna
     		if err != nil {
     			return nil, err
     		}
    -		credsUniverseDomain, err := internal.GetUniverseDomain(creds)
    -		if err != nil {
    -			return nil, err
    -		}
    -		if settings.TokenSource == nil {
    -			// We only validate non-tokensource creds, as TokenSource-based credentials
    -			// don't propagate universe.
    -			if settings.GetUniverseDomain() != credsUniverseDomain {
    -				return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain)
    -			}
    -		}
     		paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject)
     		ts := creds.TokenSource
     		if settings.ImpersonationConfig == nil && settings.TokenSource != nil {
    @@ -225,16 +306,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS
     	return otelhttp.NewTransport(trans)
     }
     
    -func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
    -	if settings.TelemetryDisabled {
    -		return trans
    -	}
    -	return &ochttp.Transport{
    -		Base:        trans,
    -		Propagation: &propagation.HTTPFormat{},
    -	}
    -}
    -
     // clonedTransport returns the given RoundTripper as a cloned *http.Transport.
     // It returns nil if the RoundTripper can't be cloned or coerced to
     // *http.Transport.
    diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
    deleted file mode 100644
    index ba7512aa26..0000000000
    --- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
    +++ /dev/null
    @@ -1,87 +0,0 @@
    -// Copyright 2018 Google LLC.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.8
    -// +build go1.8
    -
    -// Package propagation implements X-Cloud-Trace-Context header propagation used
    -// by Google Cloud products.
    -package propagation
    -
    -import (
    -	"encoding/binary"
    -	"encoding/hex"
    -	"fmt"
    -	"net/http"
    -	"strconv"
    -	"strings"
    -
    -	"go.opencensus.io/trace"
    -	"go.opencensus.io/trace/propagation"
    -)
    -
    -const (
    -	httpHeaderMaxSize = 200
    -	httpHeader        = `X-Cloud-Trace-Context`
    -)
    -
    -var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
    -
    -// HTTPFormat implements propagation.HTTPFormat to propagate
    -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
    -type HTTPFormat struct{}
    -
    -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
    -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
    -	h := req.Header.Get(httpHeader)
    -	// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
    -	// Return if the header is empty or missing, or if the header is unreasonably
    -	// large, to avoid making unnecessary copies of a large string.
    -	if h == "" || len(h) > httpHeaderMaxSize {
    -		return trace.SpanContext{}, false
    -	}
    -
    -	// Parse the trace id field.
    -	slash := strings.Index(h, `/`)
    -	if slash == -1 {
    -		return trace.SpanContext{}, false
    -	}
    -	tid, h := h[:slash], h[slash+1:]
    -
    -	buf, err := hex.DecodeString(tid)
    -	if err != nil {
    -		return trace.SpanContext{}, false
    -	}
    -	copy(sc.TraceID[:], buf)
    -
    -	// Parse the span id field.
    -	spanstr := h
    -	semicolon := strings.Index(h, `;`)
    -	if semicolon != -1 {
    -		spanstr, h = h[:semicolon], h[semicolon+1:]
    -	}
    -	sid, err := strconv.ParseUint(spanstr, 10, 64)
    -	if err != nil {
    -		return trace.SpanContext{}, false
    -	}
    -	binary.BigEndian.PutUint64(sc.SpanID[:], sid)
    -
    -	// Parse the options field, options field is optional.
    -	if !strings.HasPrefix(h, "o=") {
    -		return sc, true
    -	}
    -	o, err := strconv.ParseUint(h[2:], 10, 64)
    -	if err != nil {
    -		return trace.SpanContext{}, false
    -	}
    -	sc.TraceOptions = trace.TraceOptions(o)
    -	return sc, true
    -}
    -
    -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
    -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
    -	sid := binary.BigEndian.Uint64(sc.SpanID[:])
    -	header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
    -	req.Header.Set(httpHeader, header)
    -}
    diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
    deleted file mode 100644
    index 289693613c..0000000000
    --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md
    +++ /dev/null
    @@ -1,88 +0,0 @@
    -# Contributing
    -
    -1. Sign one of the contributor license agreements below.
    -1. Get the package:
    -
    -    `go get -d google.golang.org/appengine`
    -1. Change into the checked out source:
    -
    -    `cd $GOPATH/src/google.golang.org/appengine`
    -1. Fork the repo.
    -1. Set your fork as a remote:
    -
    -    `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
    -1. Make changes, commit to your fork.
    -1. Send a pull request with your changes. 
    -   The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
    -
    -# Testing
    -
    -## Running system tests
    -
    -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
    -
    -Run tests with `go test`:
    -
    -```
    -go test -v google.golang.org/appengine/...
    -```
    -
    -## Contributor License Agreements
    -
    -Before we can accept your pull requests you'll need to sign a Contributor
    -License Agreement (CLA):
    -
    -- **If you are an individual writing original source code** and **you own the
    -intellectual property**, then you'll need to sign an [individual CLA][indvcla].
    -- **If you work for a company that wants to allow you to contribute your work**,
    -then you'll need to sign a [corporate CLA][corpcla].
    -
    -You can sign these electronically (just scroll to the bottom). After that,
    -we'll be able to accept your pull requests.
    -
    -## Contributor Code of Conduct
    -
    -As contributors and maintainers of this project,
    -and in the interest of fostering an open and welcoming community,
    -we pledge to respect all people who contribute through reporting issues,
    -posting feature requests, updating documentation,
    -submitting pull requests or patches, and other activities.
    -
    -We are committed to making participation in this project
    -a harassment-free experience for everyone,
    -regardless of level of experience, gender, gender identity and expression,
    -sexual orientation, disability, personal appearance,
    -body size, race, ethnicity, age, religion, or nationality.
    -
    -Examples of unacceptable behavior by participants include:
    -
    -* The use of sexualized language or imagery
    -* Personal attacks
    -* Trolling or insulting/derogatory comments
    -* Public or private harassment
    -* Publishing other's private information,
    -such as physical or electronic
    -addresses, without explicit permission
    -* Other unethical or unprofessional conduct.
    -
    -Project maintainers have the right and responsibility to remove, edit, or reject
    -comments, commits, code, wiki edits, issues, and other contributions
    -that are not aligned to this Code of Conduct.
    -By adopting this Code of Conduct,
    -project maintainers commit themselves to fairly and consistently
    -applying these principles to every aspect of managing this project.
    -Project maintainers who do not follow or enforce the Code of Conduct
    -may be permanently removed from the project team.
    -
    -This code of conduct applies both within project spaces and in public spaces
    -when an individual is representing the project or its community.
    -
    -Instances of abusive, harassing, or otherwise unacceptable behavior
    -may be reported by opening an issue
    -or contacting one or more of the project maintainers.
    -
    -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
    -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
    -
    -[indvcla]: https://developers.google.com/open-source/cla/individual
    -[corpcla]: https://developers.google.com/open-source/cla/corporate
    diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
    deleted file mode 100644
    index 5ccddd9990..0000000000
    --- a/vendor/google.golang.org/appengine/README.md
    +++ /dev/null
    @@ -1,100 +0,0 @@
    -# Go App Engine packages
    -
    -[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml)
    -
    -This repository supports the Go runtime on *App Engine standard*.
    -It provides APIs for interacting with App Engine services.
    -Its canonical import path is `google.golang.org/appengine`.
    -
    -See https://cloud.google.com/appengine/docs/go/
    -for more information.
    -
    -File issue reports and feature requests on the [GitHub's issue
    -tracker](https://github.com/golang/appengine/issues).
    -
    -## Upgrading an App Engine app to the flexible environment
    -
    -This package does not work on *App Engine flexible*.
    -
    -There are many differences between the App Engine standard environment and
    -the flexible environment.
    -
    -See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
    -
    -## Directory structure
    -
    -The top level directory of this repository is the `appengine` package. It
    -contains the
    -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
    -packages are in subdirectories (e.g. `datastore`).
    -
    -There is an `internal` subdirectory that contains service protocol buffers,
    -plus packages required for connectivity to make API calls. App Engine apps
    -should not directly import any package under `internal`.
    -
    -## Updating from legacy (`import "appengine"`) packages
    -
    -If you're currently using the bare `appengine` packages
    -(that is, not these ones, imported via `google.golang.org/appengine`),
    -then you can use the `aefix` tool to help automate an upgrade to these packages.
    -
    -Run `go get google.golang.org/appengine/cmd/aefix` to install it.
    -
    -### 1. Update import paths
    -
    -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
    -You will need to update your code to use import paths starting with that; for instance,
    -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
    -
    -### 2. Update code using deprecated, removed or modified APIs
    -
    -Most App Engine services are available with exactly the same API.
    -A few APIs were cleaned up, and there are some differences:
    -
    -* `appengine.Context` has been replaced with the `Context` type from `context`.
    -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
    -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
    -* `appengine.Datacenter` now takes a `context.Context` argument.
    -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
    -* `delay.Call` now returns an error.
    -* `search.FieldLoadSaver` now handles document metadata.
    -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
    -  `context.Context` instead.
    -* `aetest` no longer declares its own Context type, and uses the standard one instead.
    -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
    -  deprecated and unused for a long time.
    -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
    -  Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
    -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
    -  Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
    -  feature you require is not present in the new
    -  [blobstore package](https://google.golang.org/appengine/blobstore).
    -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
    -  Use the standard `net` package instead.
    -
    -## Key Encode/Decode compatibility to help with datastore library migrations
    -
    -Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore.
    -The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type.
    -
    -### Enabling key conversion
    -
    -Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling.
    -
    -#### 1. Basic or manual scaling
    -
    -This start handler will enable key conversion for all handlers in the service.
    -
    -```
    -http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) {
    -    datastore.EnableKeyConversion(appengine.NewContext(r))
    -})
    -```
    -
    -#### 2. Automatic scaling
    -
    -`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))`
    -before you use code that needs key conversion.
    -
    -You may want to add this to each of your handlers, or introduce middleware where it's called.
    -`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored.
    \ No newline at end of file
    diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
    deleted file mode 100644
    index 35ba9c8967..0000000000
    --- a/vendor/google.golang.org/appengine/appengine.go
    +++ /dev/null
    @@ -1,138 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -// Package appengine provides basic functionality for Google App Engine.
    -//
    -// For more information on how to write Go apps for Google App Engine, see:
    -// https://cloud.google.com/appengine/docs/go/
    -package appengine // import "google.golang.org/appengine"
    -
    -import (
    -	"context"
    -	"net/http"
    -
    -	"github.com/golang/protobuf/proto"
    -
    -	"google.golang.org/appengine/internal"
    -)
    -
    -// The gophers party all night; the rabbits provide the beats.
    -
    -// Main is the principal entry point for an app running in App Engine.
    -//
    -// On App Engine Flexible it installs a trivial health checker if one isn't
    -// already registered, and starts listening on port 8080 (overridden by the
    -// $PORT environment variable).
    -//
    -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
    -// for details on how to do your own health checking.
    -//
    -// On App Engine Standard it ensures the server has started and is prepared to
    -// receive requests.
    -//
    -// Main never returns.
    -//
    -// Main is designed so that the app's main package looks like this:
    -//
    -//	package main
    -//
    -//	import (
    -//	        "google.golang.org/appengine"
    -//
    -//	        _ "myapp/package0"
    -//	        _ "myapp/package1"
    -//	)
    -//
    -//	func main() {
    -//	        appengine.Main()
    -//	}
    -//
    -// The "myapp/packageX" packages are expected to register HTTP handlers
    -// in their init functions.
    -func Main() {
    -	internal.Main()
    -}
    -
    -// Middleware wraps an http handler so that it can make GAE API calls
    -var Middleware func(http.Handler) http.Handler = internal.Middleware
    -
    -// IsDevAppServer reports whether the App Engine app is running in the
    -// development App Server.
    -func IsDevAppServer() bool {
    -	return internal.IsDevAppServer()
    -}
    -
    -// IsStandard reports whether the App Engine app is running in the standard
    -// environment. This includes both the first generation runtimes (<= Go 1.9)
    -// and the second generation runtimes (>= Go 1.11).
    -func IsStandard() bool {
    -	return internal.IsStandard()
    -}
    -
    -// IsFlex reports whether the App Engine app is running in the flexible environment.
    -func IsFlex() bool {
    -	return internal.IsFlex()
    -}
    -
    -// IsAppEngine reports whether the App Engine app is running on App Engine, in either
    -// the standard or flexible environment.
    -func IsAppEngine() bool {
    -	return internal.IsAppEngine()
    -}
    -
    -// IsSecondGen reports whether the App Engine app is running on the second generation
    -// runtimes (>= Go 1.11).
    -func IsSecondGen() bool {
    -	return internal.IsSecondGen()
    -}
    -
    -// NewContext returns a context for an in-flight HTTP request.
    -// This function is cheap.
    -func NewContext(req *http.Request) context.Context {
    -	return internal.ReqContext(req)
    -}
    -
    -// WithContext returns a copy of the parent context
    -// and associates it with an in-flight HTTP request.
    -// This function is cheap.
    -func WithContext(parent context.Context, req *http.Request) context.Context {
    -	return internal.WithContext(parent, req)
    -}
    -
    -// BlobKey is a key for a blobstore blob.
    -//
    -// Conceptually, this type belongs in the blobstore package, but it lives in
    -// the appengine package to avoid a circular dependency: blobstore depends on
    -// datastore, and datastore needs to refer to the BlobKey type.
    -type BlobKey string
    -
    -// GeoPoint represents a location as latitude/longitude in degrees.
    -type GeoPoint struct {
    -	Lat, Lng float64
    -}
    -
    -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
    -func (g GeoPoint) Valid() bool {
    -	return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
    -}
    -
    -// APICallFunc defines a function type for handling an API call.
    -// See WithCallOverride.
    -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
    -
    -// WithAPICallFunc returns a copy of the parent context
    -// that will cause API calls to invoke f instead of their normal operation.
    -//
    -// This is intended for advanced users only.
    -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
    -	return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
    -}
    -
    -// APICall performs an API call.
    -//
    -// This is not intended for general use; it is exported for use in conjunction
    -// with WithAPICallFunc.
    -func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
    -	return internal.Call(ctx, service, method, in, out)
    -}
    diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
    deleted file mode 100644
    index 6e1d041cd9..0000000000
    --- a/vendor/google.golang.org/appengine/appengine_vm.go
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -// Copyright 2015 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build !appengine
    -// +build !appengine
    -
    -package appengine
    -
    -import (
    -	"context"
    -)
    -
    -// BackgroundContext returns a context not associated with a request.
    -//
    -// Deprecated: App Engine no longer has a special background context.
    -// Just use context.Background().
    -func BackgroundContext() context.Context {
    -	return context.Background()
    -}
    diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
    deleted file mode 100644
    index 16d0772e2a..0000000000
    --- a/vendor/google.golang.org/appengine/errors.go
    +++ /dev/null
    @@ -1,46 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -// This file provides error functions for common API failure modes.
    -
    -package appengine
    -
    -import (
    -	"fmt"
    -
    -	"google.golang.org/appengine/internal"
    -)
    -
    -// IsOverQuota reports whether err represents an API call failure
    -// due to insufficient available quota.
    -func IsOverQuota(err error) bool {
    -	callErr, ok := err.(*internal.CallError)
    -	return ok && callErr.Code == 4
    -}
    -
    -// MultiError is returned by batch operations when there are errors with
    -// particular elements. Errors will be in a one-to-one correspondence with
    -// the input elements; successful elements will have a nil entry.
    -type MultiError []error
    -
    -func (m MultiError) Error() string {
    -	s, n := "", 0
    -	for _, e := range m {
    -		if e != nil {
    -			if n == 0 {
    -				s = e.Error()
    -			}
    -			n++
    -		}
    -	}
    -	switch n {
    -	case 0:
    -		return "(0 errors)"
    -	case 1:
    -		return s
    -	case 2:
    -		return s + " (and 1 other error)"
    -	}
    -	return fmt.Sprintf("%s (and %d other errors)", s, n-1)
    -}
    diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
    deleted file mode 100644
    index 1202fc1a53..0000000000
    --- a/vendor/google.golang.org/appengine/identity.go
    +++ /dev/null
    @@ -1,141 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package appengine
    -
    -import (
    -	"context"
    -	"time"
    -
    -	"google.golang.org/appengine/internal"
    -	pb "google.golang.org/appengine/internal/app_identity"
    -	modpb "google.golang.org/appengine/internal/modules"
    -)
    -
    -// AppID returns the application ID for the current application.
    -// The string will be a plain application ID (e.g. "appid"), with a
    -// domain prefix for custom domain deployments (e.g. "example.com:appid").
    -func AppID(c context.Context) string { return internal.AppID(c) }
    -
    -// DefaultVersionHostname returns the standard hostname of the default version
    -// of the current application (e.g. "my-app.appspot.com"). This is suitable for
    -// use in constructing URLs.
    -func DefaultVersionHostname(c context.Context) string {
    -	return internal.DefaultVersionHostname(c)
    -}
    -
    -// ModuleName returns the module name of the current instance.
    -func ModuleName(c context.Context) string {
    -	return internal.ModuleName(c)
    -}
    -
    -// ModuleHostname returns a hostname of a module instance.
    -// If module is the empty string, it refers to the module of the current instance.
    -// If version is empty, it refers to the version of the current instance if valid,
    -// or the default version of the module of the current instance.
    -// If instance is empty, ModuleHostname returns the load-balancing hostname.
    -func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
    -	req := &modpb.GetHostnameRequest{}
    -	if module != "" {
    -		req.Module = &module
    -	}
    -	if version != "" {
    -		req.Version = &version
    -	}
    -	if instance != "" {
    -		req.Instance = &instance
    -	}
    -	res := &modpb.GetHostnameResponse{}
    -	if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
    -		return "", err
    -	}
    -	return *res.Hostname, nil
    -}
    -
    -// VersionID returns the version ID for the current application.
    -// It will be of the form "X.Y", where X is specified in app.yaml,
    -// and Y is a number generated when each version of the app is uploaded.
    -// It does not include a module name.
    -func VersionID(c context.Context) string { return internal.VersionID(c) }
    -
    -// InstanceID returns a mostly-unique identifier for this instance.
    -func InstanceID() string { return internal.InstanceID() }
    -
    -// Datacenter returns an identifier for the datacenter that the instance is running in.
    -func Datacenter(c context.Context) string { return internal.Datacenter(c) }
    -
    -// ServerSoftware returns the App Engine release version.
    -// In production, it looks like "Google App Engine/X.Y.Z".
    -// In the development appserver, it looks like "Development/X.Y".
    -func ServerSoftware() string { return internal.ServerSoftware() }
    -
    -// RequestID returns a string that uniquely identifies the request.
    -func RequestID(c context.Context) string { return internal.RequestID(c) }
    -
    -// AccessToken generates an OAuth2 access token for the specified scopes on
    -// behalf of service account of this application. This token will expire after
    -// the returned time.
    -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
    -	req := &pb.GetAccessTokenRequest{Scope: scopes}
    -	res := &pb.GetAccessTokenResponse{}
    -
    -	err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
    -	if err != nil {
    -		return "", time.Time{}, err
    -	}
    -	return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
    -}
    -
    -// Certificate represents a public certificate for the app.
    -type Certificate struct {
    -	KeyName string
    -	Data    []byte // PEM-encoded X.509 certificate
    -}
    -
    -// PublicCertificates retrieves the public certificates for the app.
    -// They can be used to verify a signature returned by SignBytes.
    -func PublicCertificates(c context.Context) ([]Certificate, error) {
    -	req := &pb.GetPublicCertificateForAppRequest{}
    -	res := &pb.GetPublicCertificateForAppResponse{}
    -	if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
    -		return nil, err
    -	}
    -	var cs []Certificate
    -	for _, pc := range res.PublicCertificateList {
    -		cs = append(cs, Certificate{
    -			KeyName: pc.GetKeyName(),
    -			Data:    []byte(pc.GetX509CertificatePem()),
    -		})
    -	}
    -	return cs, nil
    -}
    -
    -// ServiceAccount returns a string representing the service account name, in
    -// the form of an email address (typically app_id@appspot.gserviceaccount.com).
    -func ServiceAccount(c context.Context) (string, error) {
    -	req := &pb.GetServiceAccountNameRequest{}
    -	res := &pb.GetServiceAccountNameResponse{}
    -
    -	err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
    -	if err != nil {
    -		return "", err
    -	}
    -	return res.GetServiceAccountName(), err
    -}
    -
    -// SignBytes signs bytes using a private key unique to your application.
    -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
    -	req := &pb.SignForAppRequest{BytesToSign: bytes}
    -	res := &pb.SignForAppResponse{}
    -
    -	if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
    -		return "", nil, err
    -	}
    -	return res.GetKeyName(), res.GetSignatureBytes(), nil
    -}
    -
    -func init() {
    -	internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
    -	internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
    deleted file mode 100644
    index 0569f5dd43..0000000000
    --- a/vendor/google.golang.org/appengine/internal/api.go
    +++ /dev/null
    @@ -1,653 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build !appengine
    -// +build !appengine
    -
    -package internal
    -
    -import (
    -	"bytes"
    -	"context"
    -	"errors"
    -	"fmt"
    -	"io/ioutil"
    -	"log"
    -	"net"
    -	"net/http"
    -	"net/url"
    -	"os"
    -	"runtime"
    -	"strconv"
    -	"strings"
    -	"sync"
    -	"sync/atomic"
    -	"time"
    -
    -	"github.com/golang/protobuf/proto"
    -
    -	basepb "google.golang.org/appengine/internal/base"
    -	logpb "google.golang.org/appengine/internal/log"
    -	remotepb "google.golang.org/appengine/internal/remote_api"
    -)
    -
    -const (
    -	apiPath = "/rpc_http"
    -)
    -
    -var (
    -	// Incoming headers.
    -	ticketHeader       = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
    -	dapperHeader       = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
    -	traceHeader        = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
    -	curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
    -	userIPHeader       = http.CanonicalHeaderKey("X-AppEngine-User-IP")
    -	remoteAddrHeader   = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
    -	devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
    -
    -	// Outgoing headers.
    -	apiEndpointHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
    -	apiEndpointHeaderValue = []string{"app-engine-apis"}
    -	apiMethodHeader        = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
    -	apiMethodHeaderValue   = []string{"/VMRemoteAPI.CallRemoteAPI"}
    -	apiDeadlineHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
    -	apiContentType         = http.CanonicalHeaderKey("Content-Type")
    -	apiContentTypeValue    = []string{"application/octet-stream"}
    -	logFlushHeader         = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
    -
    -	apiHTTPClient = &http.Client{
    -		Transport: &http.Transport{
    -			Proxy:               http.ProxyFromEnvironment,
    -			Dial:                limitDial,
    -			MaxIdleConns:        1000,
    -			MaxIdleConnsPerHost: 10000,
    -			IdleConnTimeout:     90 * time.Second,
    -		},
    -	}
    -)
    -
    -func apiURL(ctx context.Context) *url.URL {
    -	host, port := "appengine.googleapis.internal", "10001"
    -	if h := os.Getenv("API_HOST"); h != "" {
    -		host = h
    -	}
    -	if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil {
    -		host = hostOverride.(string)
    -	}
    -	if p := os.Getenv("API_PORT"); p != "" {
    -		port = p
    -	}
    -	if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil {
    -		port = portOverride.(string)
    -	}
    -	return &url.URL{
    -		Scheme: "http",
    -		Host:   host + ":" + port,
    -		Path:   apiPath,
    -	}
    -}
    -
    -// Middleware wraps an http handler so that it can make GAE API calls
    -func Middleware(next http.Handler) http.Handler {
    -	return handleHTTPMiddleware(executeRequestSafelyMiddleware(next))
    -}
    -
    -func handleHTTPMiddleware(next http.Handler) http.Handler {
    -	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    -		c := &aeContext{
    -			req:       r,
    -			outHeader: w.Header(),
    -		}
    -		r = r.WithContext(withContext(r.Context(), c))
    -		c.req = r
    -
    -		stopFlushing := make(chan int)
    -
    -		// Patch up RemoteAddr so it looks reasonable.
    -		if addr := r.Header.Get(userIPHeader); addr != "" {
    -			r.RemoteAddr = addr
    -		} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
    -			r.RemoteAddr = addr
    -		} else {
    -			// Should not normally reach here, but pick a sensible default anyway.
    -			r.RemoteAddr = "127.0.0.1"
    -		}
    -		// The address in the headers will most likely be of these forms:
    -		//	123.123.123.123
    -		//	2001:db8::1
    -		// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
    -		if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
    -			// Assume the remote address is only a host; add a default port.
    -			r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
    -		}
    -
    -		if logToLogservice() {
    -			// Start goroutine responsible for flushing app logs.
    -			// This is done after adding c to ctx.m (and stopped before removing it)
    -			// because flushing logs requires making an API call.
    -			go c.logFlusher(stopFlushing)
    -		}
    -
    -		next.ServeHTTP(c, r)
    -		c.outHeader = nil // make sure header changes aren't respected any more
    -
    -		flushed := make(chan struct{})
    -		if logToLogservice() {
    -			stopFlushing <- 1 // any logging beyond this point will be dropped
    -
    -			// Flush any pending logs asynchronously.
    -			c.pendingLogs.Lock()
    -			flushes := c.pendingLogs.flushes
    -			if len(c.pendingLogs.lines) > 0 {
    -				flushes++
    -			}
    -			c.pendingLogs.Unlock()
    -			go func() {
    -				defer close(flushed)
    -				// Force a log flush, because with very short requests we
    -				// may not ever flush logs.
    -				c.flushLog(true)
    -			}()
    -			w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
    -		}
    -
    -		// Avoid nil Write call if c.Write is never called.
    -		if c.outCode != 0 {
    -			w.WriteHeader(c.outCode)
    -		}
    -		if c.outBody != nil {
    -			w.Write(c.outBody)
    -		}
    -		if logToLogservice() {
    -			// Wait for the last flush to complete before returning,
    -			// otherwise the security ticket will not be valid.
    -			<-flushed
    -		}
    -	})
    -}
    -
    -func executeRequestSafelyMiddleware(next http.Handler) http.Handler {
    -	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    -		defer func() {
    -			if x := recover(); x != nil {
    -				c := w.(*aeContext)
    -				logf(c, 4, "%s", renderPanic(x)) // 4 == critical
    -				c.outCode = 500
    -			}
    -		}()
    -
    -		next.ServeHTTP(w, r)
    -	})
    -}
    -
    -func renderPanic(x interface{}) string {
    -	buf := make([]byte, 16<<10) // 16 KB should be plenty
    -	buf = buf[:runtime.Stack(buf, false)]
    -
    -	// Remove the first few stack frames:
    -	//   this func
    -	//   the recover closure in the caller
    -	// That will root the stack trace at the site of the panic.
    -	const (
    -		skipStart  = "internal.renderPanic"
    -		skipFrames = 2
    -	)
    -	start := bytes.Index(buf, []byte(skipStart))
    -	p := start
    -	for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
    -		p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
    -		if p < 0 {
    -			break
    -		}
    -	}
    -	if p >= 0 {
    -		// buf[start:p+1] is the block to remove.
    -		// Copy buf[p+1:] over buf[start:] and shrink buf.
    -		copy(buf[start:], buf[p+1:])
    -		buf = buf[:len(buf)-(p+1-start)]
    -	}
    -
    -	// Add panic heading.
    -	head := fmt.Sprintf("panic: %v\n\n", x)
    -	if len(head) > len(buf) {
    -		// Extremely unlikely to happen.
    -		return head
    -	}
    -	copy(buf[len(head):], buf)
    -	copy(buf, head)
    -
    -	return string(buf)
    -}
    -
    -// aeContext represents the aeContext of an in-flight HTTP request.
    -// It implements the appengine.Context and http.ResponseWriter interfaces.
    -type aeContext struct {
    -	req *http.Request
    -
    -	outCode   int
    -	outHeader http.Header
    -	outBody   []byte
    -
    -	pendingLogs struct {
    -		sync.Mutex
    -		lines   []*logpb.UserAppLogLine
    -		flushes int
    -	}
    -}
    -
    -var contextKey = "holds a *context"
    -
    -// jointContext joins two contexts in a superficial way.
    -// It takes values and timeouts from a base context, and only values from another context.
    -type jointContext struct {
    -	base       context.Context
    -	valuesOnly context.Context
    -}
    -
    -func (c jointContext) Deadline() (time.Time, bool) {
    -	return c.base.Deadline()
    -}
    -
    -func (c jointContext) Done() <-chan struct{} {
    -	return c.base.Done()
    -}
    -
    -func (c jointContext) Err() error {
    -	return c.base.Err()
    -}
    -
    -func (c jointContext) Value(key interface{}) interface{} {
    -	if val := c.base.Value(key); val != nil {
    -		return val
    -	}
    -	return c.valuesOnly.Value(key)
    -}
    -
    -// fromContext returns the App Engine context or nil if ctx is not
    -// derived from an App Engine context.
    -func fromContext(ctx context.Context) *aeContext {
    -	c, _ := ctx.Value(&contextKey).(*aeContext)
    -	return c
    -}
    -
    -func withContext(parent context.Context, c *aeContext) context.Context {
    -	ctx := context.WithValue(parent, &contextKey, c)
    -	if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
    -		ctx = withNamespace(ctx, ns)
    -	}
    -	return ctx
    -}
    -
    -func toContext(c *aeContext) context.Context {
    -	return withContext(context.Background(), c)
    -}
    -
    -func IncomingHeaders(ctx context.Context) http.Header {
    -	if c := fromContext(ctx); c != nil {
    -		return c.req.Header
    -	}
    -	return nil
    -}
    -
    -func ReqContext(req *http.Request) context.Context {
    -	return req.Context()
    -}
    -
    -func WithContext(parent context.Context, req *http.Request) context.Context {
    -	return jointContext{
    -		base:       parent,
    -		valuesOnly: req.Context(),
    -	}
    -}
    -
    -// RegisterTestRequest registers the HTTP request req for testing, such that
    -// any API calls are sent to the provided URL.
    -// It should only be used by aetest package.
    -func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request {
    -	ctx := req.Context()
    -	ctx = withAPIHostOverride(ctx, apiURL.Hostname())
    -	ctx = withAPIPortOverride(ctx, apiURL.Port())
    -	ctx = WithAppIDOverride(ctx, appID)
    -
    -	// use the unregistered request as a placeholder so that withContext can read the headers
    -	c := &aeContext{req: req}
    -	c.req = req.WithContext(withContext(ctx, c))
    -	return c.req
    -}
    -
    -var errTimeout = &CallError{
    -	Detail:  "Deadline exceeded",
    -	Code:    int32(remotepb.RpcError_CANCELLED),
    -	Timeout: true,
    -}
    -
    -func (c *aeContext) Header() http.Header { return c.outHeader }
    -
    -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
    -// codes do not permit a response body (nor response entity headers such as
    -// Content-Length, Content-Type, etc).
    -func bodyAllowedForStatus(status int) bool {
    -	switch {
    -	case status >= 100 && status <= 199:
    -		return false
    -	case status == 204:
    -		return false
    -	case status == 304:
    -		return false
    -	}
    -	return true
    -}
    -
    -func (c *aeContext) Write(b []byte) (int, error) {
    -	if c.outCode == 0 {
    -		c.WriteHeader(http.StatusOK)
    -	}
    -	if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
    -		return 0, http.ErrBodyNotAllowed
    -	}
    -	c.outBody = append(c.outBody, b...)
    -	return len(b), nil
    -}
    -
    -func (c *aeContext) WriteHeader(code int) {
    -	if c.outCode != 0 {
    -		logf(c, 3, "WriteHeader called multiple times on request.") // error level
    -		return
    -	}
    -	c.outCode = code
    -}
    -
    -func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) {
    -	apiURL := apiURL(ctx)
    -	hreq := &http.Request{
    -		Method: "POST",
    -		URL:    apiURL,
    -		Header: http.Header{
    -			apiEndpointHeader: apiEndpointHeaderValue,
    -			apiMethodHeader:   apiMethodHeaderValue,
    -			apiContentType:    apiContentTypeValue,
    -			apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
    -		},
    -		Body:          ioutil.NopCloser(bytes.NewReader(body)),
    -		ContentLength: int64(len(body)),
    -		Host:          apiURL.Host,
    -	}
    -	c := fromContext(ctx)
    -	if c != nil {
    -		if info := c.req.Header.Get(dapperHeader); info != "" {
    -			hreq.Header.Set(dapperHeader, info)
    -		}
    -		if info := c.req.Header.Get(traceHeader); info != "" {
    -			hreq.Header.Set(traceHeader, info)
    -		}
    -	}
    -
    -	tr := apiHTTPClient.Transport.(*http.Transport)
    -
    -	var timedOut int32 // atomic; set to 1 if timed out
    -	t := time.AfterFunc(timeout, func() {
    -		atomic.StoreInt32(&timedOut, 1)
    -		tr.CancelRequest(hreq)
    -	})
    -	defer t.Stop()
    -	defer func() {
    -		// Check if timeout was exceeded.
    -		if atomic.LoadInt32(&timedOut) != 0 {
    -			err = errTimeout
    -		}
    -	}()
    -
    -	hresp, err := apiHTTPClient.Do(hreq)
    -	if err != nil {
    -		return nil, &CallError{
    -			Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
    -			Code:   int32(remotepb.RpcError_UNKNOWN),
    -		}
    -	}
    -	defer hresp.Body.Close()
    -	hrespBody, err := ioutil.ReadAll(hresp.Body)
    -	if hresp.StatusCode != 200 {
    -		return nil, &CallError{
    -			Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
    -			Code:   int32(remotepb.RpcError_UNKNOWN),
    -		}
    -	}
    -	if err != nil {
    -		return nil, &CallError{
    -			Detail: fmt.Sprintf("service bridge response bad: %v", err),
    -			Code:   int32(remotepb.RpcError_UNKNOWN),
    -		}
    -	}
    -	return hrespBody, nil
    -}
    -
    -func Call(ctx context.Context, service, method string, in, out proto.Message) error {
    -	if ns := NamespaceFromContext(ctx); ns != "" {
    -		if fn, ok := NamespaceMods[service]; ok {
    -			fn(in, ns)
    -		}
    -	}
    -
    -	if f, ctx, ok := callOverrideFromContext(ctx); ok {
    -		return f(ctx, service, method, in, out)
    -	}
    -
    -	// Handle already-done contexts quickly.
    -	select {
    -	case <-ctx.Done():
    -		return ctx.Err()
    -	default:
    -	}
    -
    -	c := fromContext(ctx)
    -
    -	// Apply transaction modifications if we're in a transaction.
    -	if t := transactionFromContext(ctx); t != nil {
    -		if t.finished {
    -			return errors.New("transaction aeContext has expired")
    -		}
    -		applyTransaction(in, &t.transaction)
    -	}
    -
    -	// Default RPC timeout is 60s.
    -	timeout := 60 * time.Second
    -	if deadline, ok := ctx.Deadline(); ok {
    -		timeout = deadline.Sub(time.Now())
    -	}
    -
    -	data, err := proto.Marshal(in)
    -	if err != nil {
    -		return err
    -	}
    -
    -	ticket := ""
    -	if c != nil {
    -		ticket = c.req.Header.Get(ticketHeader)
    -		if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
    -			ticket = dri
    -		}
    -	}
    -	req := &remotepb.Request{
    -		ServiceName: &service,
    -		Method:      &method,
    -		Request:     data,
    -		RequestId:   &ticket,
    -	}
    -	hreqBody, err := proto.Marshal(req)
    -	if err != nil {
    -		return err
    -	}
    -
    -	hrespBody, err := post(ctx, hreqBody, timeout)
    -	if err != nil {
    -		return err
    -	}
    -
    -	res := &remotepb.Response{}
    -	if err := proto.Unmarshal(hrespBody, res); err != nil {
    -		return err
    -	}
    -	if res.RpcError != nil {
    -		ce := &CallError{
    -			Detail: res.RpcError.GetDetail(),
    -			Code:   *res.RpcError.Code,
    -		}
    -		switch remotepb.RpcError_ErrorCode(ce.Code) {
    -		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
    -			ce.Timeout = true
    -		}
    -		return ce
    -	}
    -	if res.ApplicationError != nil {
    -		return &APIError{
    -			Service: *req.ServiceName,
    -			Detail:  res.ApplicationError.GetDetail(),
    -			Code:    *res.ApplicationError.Code,
    -		}
    -	}
    -	if res.Exception != nil || res.JavaException != nil {
    -		// This shouldn't happen, but let's be defensive.
    -		return &CallError{
    -			Detail: "service bridge returned exception",
    -			Code:   int32(remotepb.RpcError_UNKNOWN),
    -		}
    -	}
    -	return proto.Unmarshal(res.Response, out)
    -}
    -
    -func (c *aeContext) Request() *http.Request {
    -	return c.req
    -}
    -
    -func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) {
    -	// Truncate long log lines.
    -	// TODO(dsymonds): Check if this is still necessary.
    -	const lim = 8 << 10
    -	if len(*ll.Message) > lim {
    -		suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
    -		ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
    -	}
    -
    -	c.pendingLogs.Lock()
    -	c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
    -	c.pendingLogs.Unlock()
    -}
    -
    -var logLevelName = map[int64]string{
    -	0: "DEBUG",
    -	1: "INFO",
    -	2: "WARNING",
    -	3: "ERROR",
    -	4: "CRITICAL",
    -}
    -
    -func logf(c *aeContext, level int64, format string, args ...interface{}) {
    -	if c == nil {
    -		panic("not an App Engine aeContext")
    -	}
    -	s := fmt.Sprintf(format, args...)
    -	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
    -	if logToLogservice() {
    -		c.addLogLine(&logpb.UserAppLogLine{
    -			TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
    -			Level:         &level,
    -			Message:       &s,
    -		})
    -	}
    -	// Log to stdout if not deployed
    -	if !IsSecondGen() {
    -		log.Print(logLevelName[level] + ": " + s)
    -	}
    -}
    -
    -// flushLog attempts to flush any pending logs to the appserver.
    -// It should not be called concurrently.
    -func (c *aeContext) flushLog(force bool) (flushed bool) {
    -	c.pendingLogs.Lock()
    -	// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
    -	n, rem := 0, 30<<20
    -	for ; n < len(c.pendingLogs.lines); n++ {
    -		ll := c.pendingLogs.lines[n]
    -		// Each log line will require about 3 bytes of overhead.
    -		nb := proto.Size(ll) + 3
    -		if nb > rem {
    -			break
    -		}
    -		rem -= nb
    -	}
    -	lines := c.pendingLogs.lines[:n]
    -	c.pendingLogs.lines = c.pendingLogs.lines[n:]
    -	c.pendingLogs.Unlock()
    -
    -	if len(lines) == 0 && !force {
    -		// Nothing to flush.
    -		return false
    -	}
    -
    -	rescueLogs := false
    -	defer func() {
    -		if rescueLogs {
    -			c.pendingLogs.Lock()
    -			c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
    -			c.pendingLogs.Unlock()
    -		}
    -	}()
    -
    -	buf, err := proto.Marshal(&logpb.UserAppLogGroup{
    -		LogLine: lines,
    -	})
    -	if err != nil {
    -		log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
    -		rescueLogs = true
    -		return false
    -	}
    -
    -	req := &logpb.FlushRequest{
    -		Logs: buf,
    -	}
    -	res := &basepb.VoidProto{}
    -	c.pendingLogs.Lock()
    -	c.pendingLogs.flushes++
    -	c.pendingLogs.Unlock()
    -	if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
    -		log.Printf("internal.flushLog: Flush RPC: %v", err)
    -		rescueLogs = true
    -		return false
    -	}
    -	return true
    -}
    -
    -const (
    -	// Log flushing parameters.
    -	flushInterval      = 1 * time.Second
    -	forceFlushInterval = 60 * time.Second
    -)
    -
    -func (c *aeContext) logFlusher(stop <-chan int) {
    -	lastFlush := time.Now()
    -	tick := time.NewTicker(flushInterval)
    -	for {
    -		select {
    -		case <-stop:
    -			// Request finished.
    -			tick.Stop()
    -			return
    -		case <-tick.C:
    -			force := time.Now().Sub(lastFlush) > forceFlushInterval
    -			if c.flushLog(force) {
    -				lastFlush = time.Now()
    -			}
    -		}
    -	}
    -}
    -
    -func ContextForTesting(req *http.Request) context.Context {
    -	return toContext(&aeContext{req: req})
    -}
    -
    -func logToLogservice() bool {
    -	// TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json
    -	// where $LOG_DIR is /var/log in prod and some tmpdir in dev
    -	return os.Getenv("LOG_TO_LOGSERVICE") != "0"
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
    deleted file mode 100644
    index 87c33c798e..0000000000
    --- a/vendor/google.golang.org/appengine/internal/api_classic.go
    +++ /dev/null
    @@ -1,170 +0,0 @@
    -// Copyright 2015 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build appengine
    -// +build appengine
    -
    -package internal
    -
    -import (
    -	"context"
    -	"errors"
    -	"fmt"
    -	"net/http"
    -	"time"
    -
    -	"appengine"
    -	"appengine_internal"
    -	basepb "appengine_internal/base"
    -
    -	"github.com/golang/protobuf/proto"
    -)
    -
    -var contextKey = "holds an appengine.Context"
    -
    -// fromContext returns the App Engine context or nil if ctx is not
    -// derived from an App Engine context.
    -func fromContext(ctx context.Context) appengine.Context {
    -	c, _ := ctx.Value(&contextKey).(appengine.Context)
    -	return c
    -}
    -
    -// This is only for classic App Engine adapters.
    -func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		return nil, errNotAppEngineContext
    -	}
    -	return c, nil
    -}
    -
    -func withContext(parent context.Context, c appengine.Context) context.Context {
    -	ctx := context.WithValue(parent, &contextKey, c)
    -
    -	s := &basepb.StringProto{}
    -	c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
    -	if ns := s.GetValue(); ns != "" {
    -		ctx = NamespacedContext(ctx, ns)
    -	}
    -
    -	return ctx
    -}
    -
    -func IncomingHeaders(ctx context.Context) http.Header {
    -	if c := fromContext(ctx); c != nil {
    -		if req, ok := c.Request().(*http.Request); ok {
    -			return req.Header
    -		}
    -	}
    -	return nil
    -}
    -
    -func ReqContext(req *http.Request) context.Context {
    -	return WithContext(context.Background(), req)
    -}
    -
    -func WithContext(parent context.Context, req *http.Request) context.Context {
    -	c := appengine.NewContext(req)
    -	return withContext(parent, c)
    -}
    -
    -type testingContext struct {
    -	appengine.Context
    -
    -	req *http.Request
    -}
    -
    -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
    -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
    -	if service == "__go__" && method == "GetNamespace" {
    -		return nil
    -	}
    -	return fmt.Errorf("testingContext: unsupported Call")
    -}
    -func (t *testingContext) Request() interface{} { return t.req }
    -
    -func ContextForTesting(req *http.Request) context.Context {
    -	return withContext(context.Background(), &testingContext{req: req})
    -}
    -
    -func Call(ctx context.Context, service, method string, in, out proto.Message) error {
    -	if ns := NamespaceFromContext(ctx); ns != "" {
    -		if fn, ok := NamespaceMods[service]; ok {
    -			fn(in, ns)
    -		}
    -	}
    -
    -	if f, ctx, ok := callOverrideFromContext(ctx); ok {
    -		return f(ctx, service, method, in, out)
    -	}
    -
    -	// Handle already-done contexts quickly.
    -	select {
    -	case <-ctx.Done():
    -		return ctx.Err()
    -	default:
    -	}
    -
    -	c := fromContext(ctx)
    -	if c == nil {
    -		// Give a good error message rather than a panic lower down.
    -		return errNotAppEngineContext
    -	}
    -
    -	// Apply transaction modifications if we're in a transaction.
    -	if t := transactionFromContext(ctx); t != nil {
    -		if t.finished {
    -			return errors.New("transaction context has expired")
    -		}
    -		applyTransaction(in, &t.transaction)
    -	}
    -
    -	var opts *appengine_internal.CallOptions
    -	if d, ok := ctx.Deadline(); ok {
    -		opts = &appengine_internal.CallOptions{
    -			Timeout: d.Sub(time.Now()),
    -		}
    -	}
    -
    -	err := c.Call(service, method, in, out, opts)
    -	switch v := err.(type) {
    -	case *appengine_internal.APIError:
    -		return &APIError{
    -			Service: v.Service,
    -			Detail:  v.Detail,
    -			Code:    v.Code,
    -		}
    -	case *appengine_internal.CallError:
    -		return &CallError{
    -			Detail:  v.Detail,
    -			Code:    v.Code,
    -			Timeout: v.Timeout,
    -		}
    -	}
    -	return err
    -}
    -
    -func Middleware(next http.Handler) http.Handler {
    -	panic("Middleware called; this should be impossible")
    -}
    -
    -func logf(c appengine.Context, level int64, format string, args ...interface{}) {
    -	var fn func(format string, args ...interface{})
    -	switch level {
    -	case 0:
    -		fn = c.Debugf
    -	case 1:
    -		fn = c.Infof
    -	case 2:
    -		fn = c.Warningf
    -	case 3:
    -		fn = c.Errorf
    -	case 4:
    -		fn = c.Criticalf
    -	default:
    -		// This shouldn't happen.
    -		fn = c.Criticalf
    -	}
    -	fn(format, args...)
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
    deleted file mode 100644
    index 5b95c13d92..0000000000
    --- a/vendor/google.golang.org/appengine/internal/api_common.go
    +++ /dev/null
    @@ -1,141 +0,0 @@
    -// Copyright 2015 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -import (
    -	"context"
    -	"errors"
    -	"os"
    -
    -	"github.com/golang/protobuf/proto"
    -)
    -
    -type ctxKey string
    -
    -func (c ctxKey) String() string {
    -	return "appengine context key: " + string(c)
    -}
    -
    -var errNotAppEngineContext = errors.New("not an App Engine context")
    -
    -type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error
    -
    -var callOverrideKey = "holds []CallOverrideFunc"
    -
    -func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context {
    -	// We avoid appending to any existing call override
    -	// so we don't risk overwriting a popped stack below.
    -	var cofs []CallOverrideFunc
    -	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
    -		cofs = append(cofs, uf...)
    -	}
    -	cofs = append(cofs, f)
    -	return context.WithValue(ctx, &callOverrideKey, cofs)
    -}
    -
    -func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) {
    -	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
    -	if len(cofs) == 0 {
    -		return nil, nil, false
    -	}
    -	// We found a list of overrides; grab the last, and reconstitute a
    -	// context that will hide it.
    -	f := cofs[len(cofs)-1]
    -	ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
    -	return f, ctx, true
    -}
    -
    -type logOverrideFunc func(level int64, format string, args ...interface{})
    -
    -var logOverrideKey = "holds a logOverrideFunc"
    -
    -func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context {
    -	return context.WithValue(ctx, &logOverrideKey, f)
    -}
    -
    -var appIDOverrideKey = "holds a string, being the full app ID"
    -
    -func WithAppIDOverride(ctx context.Context, appID string) context.Context {
    -	return context.WithValue(ctx, &appIDOverrideKey, appID)
    -}
    -
    -var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST")
    -
    -func withAPIHostOverride(ctx context.Context, apiHost string) context.Context {
    -	return context.WithValue(ctx, apiHostOverrideKey, apiHost)
    -}
    -
    -var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT")
    -
    -func withAPIPortOverride(ctx context.Context, apiPort string) context.Context {
    -	return context.WithValue(ctx, apiPortOverrideKey, apiPort)
    -}
    -
    -var namespaceKey = "holds the namespace string"
    -
    -func withNamespace(ctx context.Context, ns string) context.Context {
    -	return context.WithValue(ctx, &namespaceKey, ns)
    -}
    -
    -func NamespaceFromContext(ctx context.Context) string {
    -	// If there's no namespace, return the empty string.
    -	ns, _ := ctx.Value(&namespaceKey).(string)
    -	return ns
    -}
    -
    -// FullyQualifiedAppID returns the fully-qualified application ID.
    -// This may contain a partition prefix (e.g. "s~" for High Replication apps),
    -// or a domain prefix (e.g. "example.com:").
    -func FullyQualifiedAppID(ctx context.Context) string {
    -	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
    -		return id
    -	}
    -	return fullyQualifiedAppID(ctx)
    -}
    -
    -func Logf(ctx context.Context, level int64, format string, args ...interface{}) {
    -	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
    -		f(level, format, args...)
    -		return
    -	}
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	logf(c, level, format, args...)
    -}
    -
    -// NamespacedContext wraps a Context to support namespaces.
    -func NamespacedContext(ctx context.Context, namespace string) context.Context {
    -	return withNamespace(ctx, namespace)
    -}
    -
    -// SetTestEnv sets the env variables for testing background ticket in Flex.
    -func SetTestEnv() func() {
    -	var environ = []struct {
    -		key, value string
    -	}{
    -		{"GAE_LONG_APP_ID", "my-app-id"},
    -		{"GAE_MINOR_VERSION", "067924799508853122"},
    -		{"GAE_MODULE_INSTANCE", "0"},
    -		{"GAE_MODULE_NAME", "default"},
    -		{"GAE_MODULE_VERSION", "20150612t184001"},
    -	}
    -
    -	for _, v := range environ {
    -		old := os.Getenv(v.key)
    -		os.Setenv(v.key, v.value)
    -		v.value = old
    -	}
    -	return func() { // Restore old environment after the test completes.
    -		for _, v := range environ {
    -			if v.value == "" {
    -				os.Unsetenv(v.key)
    -				continue
    -			}
    -			os.Setenv(v.key, v.value)
    -		}
    -	}
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
    deleted file mode 100644
    index 11df8c07b5..0000000000
    --- a/vendor/google.golang.org/appengine/internal/app_id.go
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -import (
    -	"strings"
    -)
    -
    -func parseFullAppID(appid string) (partition, domain, displayID string) {
    -	if i := strings.Index(appid, "~"); i != -1 {
    -		partition, appid = appid[:i], appid[i+1:]
    -	}
    -	if i := strings.Index(appid, ":"); i != -1 {
    -		domain, appid = appid[:i], appid[i+1:]
    -	}
    -	return partition, domain, appid
    -}
    -
    -// appID returns "appid" or "domain.com:appid".
    -func appID(fullAppID string) string {
    -	_, dom, dis := parseFullAppID(fullAppID)
    -	if dom != "" {
    -		return dom + ":" + dis
    -	}
    -	return dis
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
    deleted file mode 100644
    index 9a2ff77ab5..0000000000
    --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
    +++ /dev/null
    @@ -1,611 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
    -
    -package app_identity
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type AppIdentityServiceError_ErrorCode int32
    -
    -const (
    -	AppIdentityServiceError_SUCCESS           AppIdentityServiceError_ErrorCode = 0
    -	AppIdentityServiceError_UNKNOWN_SCOPE     AppIdentityServiceError_ErrorCode = 9
    -	AppIdentityServiceError_BLOB_TOO_LARGE    AppIdentityServiceError_ErrorCode = 1000
    -	AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
    -	AppIdentityServiceError_NOT_A_VALID_APP   AppIdentityServiceError_ErrorCode = 1002
    -	AppIdentityServiceError_UNKNOWN_ERROR     AppIdentityServiceError_ErrorCode = 1003
    -	AppIdentityServiceError_NOT_ALLOWED       AppIdentityServiceError_ErrorCode = 1005
    -	AppIdentityServiceError_NOT_IMPLEMENTED   AppIdentityServiceError_ErrorCode = 1006
    -)
    -
    -var AppIdentityServiceError_ErrorCode_name = map[int32]string{
    -	0:    "SUCCESS",
    -	9:    "UNKNOWN_SCOPE",
    -	1000: "BLOB_TOO_LARGE",
    -	1001: "DEADLINE_EXCEEDED",
    -	1002: "NOT_A_VALID_APP",
    -	1003: "UNKNOWN_ERROR",
    -	1005: "NOT_ALLOWED",
    -	1006: "NOT_IMPLEMENTED",
    -}
    -var AppIdentityServiceError_ErrorCode_value = map[string]int32{
    -	"SUCCESS":           0,
    -	"UNKNOWN_SCOPE":     9,
    -	"BLOB_TOO_LARGE":    1000,
    -	"DEADLINE_EXCEEDED": 1001,
    -	"NOT_A_VALID_APP":   1002,
    -	"UNKNOWN_ERROR":     1003,
    -	"NOT_ALLOWED":       1005,
    -	"NOT_IMPLEMENTED":   1006,
    -}
    -
    -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
    -	p := new(AppIdentityServiceError_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x AppIdentityServiceError_ErrorCode) String() string {
    -	return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
    -}
    -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = AppIdentityServiceError_ErrorCode(value)
    -	return nil
    -}
    -func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0}
    -}
    -
    -type AppIdentityServiceError struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *AppIdentityServiceError) Reset()         { *m = AppIdentityServiceError{} }
    -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
    -func (*AppIdentityServiceError) ProtoMessage()    {}
    -func (*AppIdentityServiceError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0}
    -}
    -func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b)
    -}
    -func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic)
    -}
    -func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AppIdentityServiceError.Merge(dst, src)
    -}
    -func (m *AppIdentityServiceError) XXX_Size() int {
    -	return xxx_messageInfo_AppIdentityServiceError.Size(m)
    -}
    -func (m *AppIdentityServiceError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo
    -
    -type SignForAppRequest struct {
    -	BytesToSign          []byte   `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *SignForAppRequest) Reset()         { *m = SignForAppRequest{} }
    -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
    -func (*SignForAppRequest) ProtoMessage()    {}
    -func (*SignForAppRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1}
    -}
    -func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b)
    -}
    -func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *SignForAppRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_SignForAppRequest.Merge(dst, src)
    -}
    -func (m *SignForAppRequest) XXX_Size() int {
    -	return xxx_messageInfo_SignForAppRequest.Size(m)
    -}
    -func (m *SignForAppRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_SignForAppRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo
    -
    -func (m *SignForAppRequest) GetBytesToSign() []byte {
    -	if m != nil {
    -		return m.BytesToSign
    -	}
    -	return nil
    -}
    -
    -type SignForAppResponse struct {
    -	KeyName              *string  `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
    -	SignatureBytes       []byte   `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *SignForAppResponse) Reset()         { *m = SignForAppResponse{} }
    -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
    -func (*SignForAppResponse) ProtoMessage()    {}
    -func (*SignForAppResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2}
    -}
    -func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b)
    -}
    -func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *SignForAppResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_SignForAppResponse.Merge(dst, src)
    -}
    -func (m *SignForAppResponse) XXX_Size() int {
    -	return xxx_messageInfo_SignForAppResponse.Size(m)
    -}
    -func (m *SignForAppResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_SignForAppResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo
    -
    -func (m *SignForAppResponse) GetKeyName() string {
    -	if m != nil && m.KeyName != nil {
    -		return *m.KeyName
    -	}
    -	return ""
    -}
    -
    -func (m *SignForAppResponse) GetSignatureBytes() []byte {
    -	if m != nil {
    -		return m.SignatureBytes
    -	}
    -	return nil
    -}
    -
    -type GetPublicCertificateForAppRequest struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetPublicCertificateForAppRequest) Reset()         { *m = GetPublicCertificateForAppRequest{} }
    -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetPublicCertificateForAppRequest) ProtoMessage()    {}
    -func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3}
    -}
    -func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b)
    -}
    -func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src)
    -}
    -func (m *GetPublicCertificateForAppRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m)
    -}
    -func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo
    -
    -type PublicCertificate struct {
    -	KeyName              *string  `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
    -	X509CertificatePem   *string  `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *PublicCertificate) Reset()         { *m = PublicCertificate{} }
    -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
    -func (*PublicCertificate) ProtoMessage()    {}
    -func (*PublicCertificate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4}
    -}
    -func (m *PublicCertificate) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PublicCertificate.Unmarshal(m, b)
    -}
    -func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic)
    -}
    -func (dst *PublicCertificate) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PublicCertificate.Merge(dst, src)
    -}
    -func (m *PublicCertificate) XXX_Size() int {
    -	return xxx_messageInfo_PublicCertificate.Size(m)
    -}
    -func (m *PublicCertificate) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PublicCertificate.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo
    -
    -func (m *PublicCertificate) GetKeyName() string {
    -	if m != nil && m.KeyName != nil {
    -		return *m.KeyName
    -	}
    -	return ""
    -}
    -
    -func (m *PublicCertificate) GetX509CertificatePem() string {
    -	if m != nil && m.X509CertificatePem != nil {
    -		return *m.X509CertificatePem
    -	}
    -	return ""
    -}
    -
    -type GetPublicCertificateForAppResponse struct {
    -	PublicCertificateList      []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"`
    -	MaxClientCacheTimeInSecond *int64               `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"`
    -	XXX_NoUnkeyedLiteral       struct{}             `json:"-"`
    -	XXX_unrecognized           []byte               `json:"-"`
    -	XXX_sizecache              int32                `json:"-"`
    -}
    -
    -func (m *GetPublicCertificateForAppResponse) Reset()         { *m = GetPublicCertificateForAppResponse{} }
    -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetPublicCertificateForAppResponse) ProtoMessage()    {}
    -func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5}
    -}
    -func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b)
    -}
    -func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src)
    -}
    -func (m *GetPublicCertificateForAppResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m)
    -}
    -func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo
    -
    -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
    -	if m != nil {
    -		return m.PublicCertificateList
    -	}
    -	return nil
    -}
    -
    -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
    -	if m != nil && m.MaxClientCacheTimeInSecond != nil {
    -		return *m.MaxClientCacheTimeInSecond
    -	}
    -	return 0
    -}
    -
    -type GetServiceAccountNameRequest struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetServiceAccountNameRequest) Reset()         { *m = GetServiceAccountNameRequest{} }
    -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetServiceAccountNameRequest) ProtoMessage()    {}
    -func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6}
    -}
    -func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b)
    -}
    -func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src)
    -}
    -func (m *GetServiceAccountNameRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetServiceAccountNameRequest.Size(m)
    -}
    -func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo
    -
    -type GetServiceAccountNameResponse struct {
    -	ServiceAccountName   *string  `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetServiceAccountNameResponse) Reset()         { *m = GetServiceAccountNameResponse{} }
    -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetServiceAccountNameResponse) ProtoMessage()    {}
    -func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7}
    -}
    -func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b)
    -}
    -func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src)
    -}
    -func (m *GetServiceAccountNameResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetServiceAccountNameResponse.Size(m)
    -}
    -func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo
    -
    -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
    -	if m != nil && m.ServiceAccountName != nil {
    -		return *m.ServiceAccountName
    -	}
    -	return ""
    -}
    -
    -type GetAccessTokenRequest struct {
    -	Scope                []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
    -	ServiceAccountId     *int64   `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"`
    -	ServiceAccountName   *string  `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetAccessTokenRequest) Reset()         { *m = GetAccessTokenRequest{} }
    -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetAccessTokenRequest) ProtoMessage()    {}
    -func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8}
    -}
    -func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b)
    -}
    -func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src)
    -}
    -func (m *GetAccessTokenRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetAccessTokenRequest.Size(m)
    -}
    -func (m *GetAccessTokenRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo
    -
    -func (m *GetAccessTokenRequest) GetScope() []string {
    -	if m != nil {
    -		return m.Scope
    -	}
    -	return nil
    -}
    -
    -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
    -	if m != nil && m.ServiceAccountId != nil {
    -		return *m.ServiceAccountId
    -	}
    -	return 0
    -}
    -
    -func (m *GetAccessTokenRequest) GetServiceAccountName() string {
    -	if m != nil && m.ServiceAccountName != nil {
    -		return *m.ServiceAccountName
    -	}
    -	return ""
    -}
    -
    -type GetAccessTokenResponse struct {
    -	AccessToken          *string  `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"`
    -	ExpirationTime       *int64   `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetAccessTokenResponse) Reset()         { *m = GetAccessTokenResponse{} }
    -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetAccessTokenResponse) ProtoMessage()    {}
    -func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9}
    -}
    -func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b)
    -}
    -func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src)
    -}
    -func (m *GetAccessTokenResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetAccessTokenResponse.Size(m)
    -}
    -func (m *GetAccessTokenResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo
    -
    -func (m *GetAccessTokenResponse) GetAccessToken() string {
    -	if m != nil && m.AccessToken != nil {
    -		return *m.AccessToken
    -	}
    -	return ""
    -}
    -
    -func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
    -	if m != nil && m.ExpirationTime != nil {
    -		return *m.ExpirationTime
    -	}
    -	return 0
    -}
    -
    -type GetDefaultGcsBucketNameRequest struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetDefaultGcsBucketNameRequest) Reset()         { *m = GetDefaultGcsBucketNameRequest{} }
    -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetDefaultGcsBucketNameRequest) ProtoMessage()    {}
    -func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10}
    -}
    -func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b)
    -}
    -func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src)
    -}
    -func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m)
    -}
    -func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo
    -
    -type GetDefaultGcsBucketNameResponse struct {
    -	DefaultGcsBucketName *string  `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetDefaultGcsBucketNameResponse) Reset()         { *m = GetDefaultGcsBucketNameResponse{} }
    -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetDefaultGcsBucketNameResponse) ProtoMessage()    {}
    -func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11}
    -}
    -func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b)
    -}
    -func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src)
    -}
    -func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m)
    -}
    -func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo
    -
    -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
    -	if m != nil && m.DefaultGcsBucketName != nil {
    -		return *m.DefaultGcsBucketName
    -	}
    -	return ""
    -}
    -
    -func init() {
    -	proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError")
    -	proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest")
    -	proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse")
    -	proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest")
    -	proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate")
    -	proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse")
    -	proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest")
    -	proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse")
    -	proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest")
    -	proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse")
    -	proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest")
    -	proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4)
    -}
    -
    -var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{
    -	// 676 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58,
    -	0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e,
    -	0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a,
    -	0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f,
    -	0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37,
    -	0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87,
    -	0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c,
    -	0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e,
    -	0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a,
    -	0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9,
    -	0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2,
    -	0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1,
    -	0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d,
    -	0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4,
    -	0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b,
    -	0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71,
    -	0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d,
    -	0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf,
    -	0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd,
    -	0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30,
    -	0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79,
    -	0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66,
    -	0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea,
    -	0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a,
    -	0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34,
    -	0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe,
    -	0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38,
    -	0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42,
    -	0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde,
    -	0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84,
    -	0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8,
    -	0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc,
    -	0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92,
    -	0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14,
    -	0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08,
    -	0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79,
    -	0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b,
    -	0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f,
    -	0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa,
    -	0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1,
    -	0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc,
    -	0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38,
    -	0xf3, 0x04, 0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
    deleted file mode 100644
    index 19610ca5b7..0000000000
    --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
    +++ /dev/null
    @@ -1,64 +0,0 @@
    -syntax = "proto2";
    -option go_package = "app_identity";
    -
    -package appengine;
    -
    -message AppIdentityServiceError {
    -  enum ErrorCode {
    -    SUCCESS = 0;
    -    UNKNOWN_SCOPE = 9;
    -    BLOB_TOO_LARGE = 1000;
    -    DEADLINE_EXCEEDED = 1001;
    -    NOT_A_VALID_APP = 1002;
    -    UNKNOWN_ERROR = 1003;
    -    NOT_ALLOWED = 1005;
    -    NOT_IMPLEMENTED = 1006;
    -  }
    -}
    -
    -message SignForAppRequest {
    -  optional bytes bytes_to_sign = 1;
    -}
    -
    -message SignForAppResponse {
    -  optional string key_name = 1;
    -  optional bytes signature_bytes = 2;
    -}
    -
    -message GetPublicCertificateForAppRequest {
    -}
    -
    -message PublicCertificate {
    -  optional string key_name = 1;
    -  optional string x509_certificate_pem = 2;
    -}
    -
    -message GetPublicCertificateForAppResponse {
    -  repeated PublicCertificate public_certificate_list = 1;
    -  optional int64 max_client_cache_time_in_second = 2;
    -}
    -
    -message GetServiceAccountNameRequest {
    -}
    -
    -message GetServiceAccountNameResponse {
    -  optional string service_account_name = 1;
    -}
    -
    -message GetAccessTokenRequest {
    -  repeated string scope = 1;
    -  optional int64 service_account_id = 2;
    -  optional string service_account_name = 3;
    -}
    -
    -message GetAccessTokenResponse {
    -  optional string access_token = 1;
    -  optional int64 expiration_time = 2;
    -}
    -
    -message GetDefaultGcsBucketNameRequest {
    -}
    -
    -message GetDefaultGcsBucketNameResponse {
    -  optional string default_gcs_bucket_name = 1;
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
    deleted file mode 100644
    index db4777e68e..0000000000
    --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
    +++ /dev/null
    @@ -1,308 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/base/api_base.proto
    -
    -package base
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type StringProto struct {
    -	Value                *string  `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *StringProto) Reset()         { *m = StringProto{} }
    -func (m *StringProto) String() string { return proto.CompactTextString(m) }
    -func (*StringProto) ProtoMessage()    {}
    -func (*StringProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
    -}
    -func (m *StringProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_StringProto.Unmarshal(m, b)
    -}
    -func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
    -}
    -func (dst *StringProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StringProto.Merge(dst, src)
    -}
    -func (m *StringProto) XXX_Size() int {
    -	return xxx_messageInfo_StringProto.Size(m)
    -}
    -func (m *StringProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StringProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StringProto proto.InternalMessageInfo
    -
    -func (m *StringProto) GetValue() string {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return ""
    -}
    -
    -type Integer32Proto struct {
    -	Value                *int32   `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Integer32Proto) Reset()         { *m = Integer32Proto{} }
    -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
    -func (*Integer32Proto) ProtoMessage()    {}
    -func (*Integer32Proto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
    -}
    -func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
    -}
    -func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
    -}
    -func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Integer32Proto.Merge(dst, src)
    -}
    -func (m *Integer32Proto) XXX_Size() int {
    -	return xxx_messageInfo_Integer32Proto.Size(m)
    -}
    -func (m *Integer32Proto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
    -
    -func (m *Integer32Proto) GetValue() int32 {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return 0
    -}
    -
    -type Integer64Proto struct {
    -	Value                *int64   `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Integer64Proto) Reset()         { *m = Integer64Proto{} }
    -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
    -func (*Integer64Proto) ProtoMessage()    {}
    -func (*Integer64Proto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
    -}
    -func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
    -}
    -func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
    -}
    -func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Integer64Proto.Merge(dst, src)
    -}
    -func (m *Integer64Proto) XXX_Size() int {
    -	return xxx_messageInfo_Integer64Proto.Size(m)
    -}
    -func (m *Integer64Proto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
    -
    -func (m *Integer64Proto) GetValue() int64 {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return 0
    -}
    -
    -type BoolProto struct {
    -	Value                *bool    `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *BoolProto) Reset()         { *m = BoolProto{} }
    -func (m *BoolProto) String() string { return proto.CompactTextString(m) }
    -func (*BoolProto) ProtoMessage()    {}
    -func (*BoolProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
    -}
    -func (m *BoolProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_BoolProto.Unmarshal(m, b)
    -}
    -func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
    -}
    -func (dst *BoolProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_BoolProto.Merge(dst, src)
    -}
    -func (m *BoolProto) XXX_Size() int {
    -	return xxx_messageInfo_BoolProto.Size(m)
    -}
    -func (m *BoolProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_BoolProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_BoolProto proto.InternalMessageInfo
    -
    -func (m *BoolProto) GetValue() bool {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return false
    -}
    -
    -type DoubleProto struct {
    -	Value                *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *DoubleProto) Reset()         { *m = DoubleProto{} }
    -func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
    -func (*DoubleProto) ProtoMessage()    {}
    -func (*DoubleProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
    -}
    -func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
    -}
    -func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
    -}
    -func (dst *DoubleProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_DoubleProto.Merge(dst, src)
    -}
    -func (m *DoubleProto) XXX_Size() int {
    -	return xxx_messageInfo_DoubleProto.Size(m)
    -}
    -func (m *DoubleProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_DoubleProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
    -
    -func (m *DoubleProto) GetValue() float64 {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return 0
    -}
    -
    -type BytesProto struct {
    -	Value                []byte   `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *BytesProto) Reset()         { *m = BytesProto{} }
    -func (m *BytesProto) String() string { return proto.CompactTextString(m) }
    -func (*BytesProto) ProtoMessage()    {}
    -func (*BytesProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
    -}
    -func (m *BytesProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_BytesProto.Unmarshal(m, b)
    -}
    -func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
    -}
    -func (dst *BytesProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_BytesProto.Merge(dst, src)
    -}
    -func (m *BytesProto) XXX_Size() int {
    -	return xxx_messageInfo_BytesProto.Size(m)
    -}
    -func (m *BytesProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_BytesProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_BytesProto proto.InternalMessageInfo
    -
    -func (m *BytesProto) GetValue() []byte {
    -	if m != nil {
    -		return m.Value
    -	}
    -	return nil
    -}
    -
    -type VoidProto struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *VoidProto) Reset()         { *m = VoidProto{} }
    -func (m *VoidProto) String() string { return proto.CompactTextString(m) }
    -func (*VoidProto) ProtoMessage()    {}
    -func (*VoidProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
    -}
    -func (m *VoidProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_VoidProto.Unmarshal(m, b)
    -}
    -func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
    -}
    -func (dst *VoidProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_VoidProto.Merge(dst, src)
    -}
    -func (m *VoidProto) XXX_Size() int {
    -	return xxx_messageInfo_VoidProto.Size(m)
    -}
    -func (m *VoidProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_VoidProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_VoidProto proto.InternalMessageInfo
    -
    -func init() {
    -	proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
    -	proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
    -	proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
    -	proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
    -	proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
    -	proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
    -	proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
    -}
    -
    -var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
    -	// 199 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
    -	0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
    -	0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
    -	0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
    -	0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
    -	0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
    -	0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
    -	0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
    -	0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
    -	0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
    -	0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
    -	0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
    -	0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
    deleted file mode 100644
    index 56cd7a3cad..0000000000
    --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -// Built-in base types for API calls. Primarily useful as return types.
    -
    -syntax = "proto2";
    -option go_package = "base";
    -
    -package appengine.base;
    -
    -message StringProto {
    -  required string value = 1;
    -}
    -
    -message Integer32Proto {
    -  required int32 value = 1;
    -}
    -
    -message Integer64Proto {
    -  required int64 value = 1;
    -}
    -
    -message BoolProto {
    -  required bool value = 1;
    -}
    -
    -message DoubleProto {
    -  required double value = 1;
    -}
    -
    -message BytesProto {
    -  required bytes value = 1 [ctype=CORD];
    -}
    -
    -message VoidProto {
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
    deleted file mode 100644
    index 2fb7482896..0000000000
    --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
    +++ /dev/null
    @@ -1,4367 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
    -
    -package datastore
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type Property_Meaning int32
    -
    -const (
    -	Property_NO_MEANING       Property_Meaning = 0
    -	Property_BLOB             Property_Meaning = 14
    -	Property_TEXT             Property_Meaning = 15
    -	Property_BYTESTRING       Property_Meaning = 16
    -	Property_ATOM_CATEGORY    Property_Meaning = 1
    -	Property_ATOM_LINK        Property_Meaning = 2
    -	Property_ATOM_TITLE       Property_Meaning = 3
    -	Property_ATOM_CONTENT     Property_Meaning = 4
    -	Property_ATOM_SUMMARY     Property_Meaning = 5
    -	Property_ATOM_AUTHOR      Property_Meaning = 6
    -	Property_GD_WHEN          Property_Meaning = 7
    -	Property_GD_EMAIL         Property_Meaning = 8
    -	Property_GEORSS_POINT     Property_Meaning = 9
    -	Property_GD_IM            Property_Meaning = 10
    -	Property_GD_PHONENUMBER   Property_Meaning = 11
    -	Property_GD_POSTALADDRESS Property_Meaning = 12
    -	Property_GD_RATING        Property_Meaning = 13
    -	Property_BLOBKEY          Property_Meaning = 17
    -	Property_ENTITY_PROTO     Property_Meaning = 19
    -	Property_INDEX_VALUE      Property_Meaning = 18
    -)
    -
    -var Property_Meaning_name = map[int32]string{
    -	0:  "NO_MEANING",
    -	14: "BLOB",
    -	15: "TEXT",
    -	16: "BYTESTRING",
    -	1:  "ATOM_CATEGORY",
    -	2:  "ATOM_LINK",
    -	3:  "ATOM_TITLE",
    -	4:  "ATOM_CONTENT",
    -	5:  "ATOM_SUMMARY",
    -	6:  "ATOM_AUTHOR",
    -	7:  "GD_WHEN",
    -	8:  "GD_EMAIL",
    -	9:  "GEORSS_POINT",
    -	10: "GD_IM",
    -	11: "GD_PHONENUMBER",
    -	12: "GD_POSTALADDRESS",
    -	13: "GD_RATING",
    -	17: "BLOBKEY",
    -	19: "ENTITY_PROTO",
    -	18: "INDEX_VALUE",
    -}
    -var Property_Meaning_value = map[string]int32{
    -	"NO_MEANING":       0,
    -	"BLOB":             14,
    -	"TEXT":             15,
    -	"BYTESTRING":       16,
    -	"ATOM_CATEGORY":    1,
    -	"ATOM_LINK":        2,
    -	"ATOM_TITLE":       3,
    -	"ATOM_CONTENT":     4,
    -	"ATOM_SUMMARY":     5,
    -	"ATOM_AUTHOR":      6,
    -	"GD_WHEN":          7,
    -	"GD_EMAIL":         8,
    -	"GEORSS_POINT":     9,
    -	"GD_IM":            10,
    -	"GD_PHONENUMBER":   11,
    -	"GD_POSTALADDRESS": 12,
    -	"GD_RATING":        13,
    -	"BLOBKEY":          17,
    -	"ENTITY_PROTO":     19,
    -	"INDEX_VALUE":      18,
    -}
    -
    -func (x Property_Meaning) Enum() *Property_Meaning {
    -	p := new(Property_Meaning)
    -	*p = x
    -	return p
    -}
    -func (x Property_Meaning) String() string {
    -	return proto.EnumName(Property_Meaning_name, int32(x))
    -}
    -func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Property_Meaning(value)
    -	return nil
    -}
    -func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
    -}
    -
    -type Property_FtsTokenizationOption int32
    -
    -const (
    -	Property_HTML Property_FtsTokenizationOption = 1
    -	Property_ATOM Property_FtsTokenizationOption = 2
    -)
    -
    -var Property_FtsTokenizationOption_name = map[int32]string{
    -	1: "HTML",
    -	2: "ATOM",
    -}
    -var Property_FtsTokenizationOption_value = map[string]int32{
    -	"HTML": 1,
    -	"ATOM": 2,
    -}
    -
    -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
    -	p := new(Property_FtsTokenizationOption)
    -	*p = x
    -	return p
    -}
    -func (x Property_FtsTokenizationOption) String() string {
    -	return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
    -}
    -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Property_FtsTokenizationOption(value)
    -	return nil
    -}
    -func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
    -}
    -
    -type EntityProto_Kind int32
    -
    -const (
    -	EntityProto_GD_CONTACT EntityProto_Kind = 1
    -	EntityProto_GD_EVENT   EntityProto_Kind = 2
    -	EntityProto_GD_MESSAGE EntityProto_Kind = 3
    -)
    -
    -var EntityProto_Kind_name = map[int32]string{
    -	1: "GD_CONTACT",
    -	2: "GD_EVENT",
    -	3: "GD_MESSAGE",
    -}
    -var EntityProto_Kind_value = map[string]int32{
    -	"GD_CONTACT": 1,
    -	"GD_EVENT":   2,
    -	"GD_MESSAGE": 3,
    -}
    -
    -func (x EntityProto_Kind) Enum() *EntityProto_Kind {
    -	p := new(EntityProto_Kind)
    -	*p = x
    -	return p
    -}
    -func (x EntityProto_Kind) String() string {
    -	return proto.EnumName(EntityProto_Kind_name, int32(x))
    -}
    -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
    -	if err != nil {
    -		return err
    -	}
    -	*x = EntityProto_Kind(value)
    -	return nil
    -}
    -func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
    -}
    -
    -type Index_Property_Direction int32
    -
    -const (
    -	Index_Property_ASCENDING  Index_Property_Direction = 1
    -	Index_Property_DESCENDING Index_Property_Direction = 2
    -)
    -
    -var Index_Property_Direction_name = map[int32]string{
    -	1: "ASCENDING",
    -	2: "DESCENDING",
    -}
    -var Index_Property_Direction_value = map[string]int32{
    -	"ASCENDING":  1,
    -	"DESCENDING": 2,
    -}
    -
    -func (x Index_Property_Direction) Enum() *Index_Property_Direction {
    -	p := new(Index_Property_Direction)
    -	*p = x
    -	return p
    -}
    -func (x Index_Property_Direction) String() string {
    -	return proto.EnumName(Index_Property_Direction_name, int32(x))
    -}
    -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Index_Property_Direction(value)
    -	return nil
    -}
    -func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
    -}
    -
    -type CompositeIndex_State int32
    -
    -const (
    -	CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
    -	CompositeIndex_READ_WRITE CompositeIndex_State = 2
    -	CompositeIndex_DELETED    CompositeIndex_State = 3
    -	CompositeIndex_ERROR      CompositeIndex_State = 4
    -)
    -
    -var CompositeIndex_State_name = map[int32]string{
    -	1: "WRITE_ONLY",
    -	2: "READ_WRITE",
    -	3: "DELETED",
    -	4: "ERROR",
    -}
    -var CompositeIndex_State_value = map[string]int32{
    -	"WRITE_ONLY": 1,
    -	"READ_WRITE": 2,
    -	"DELETED":    3,
    -	"ERROR":      4,
    -}
    -
    -func (x CompositeIndex_State) Enum() *CompositeIndex_State {
    -	p := new(CompositeIndex_State)
    -	*p = x
    -	return p
    -}
    -func (x CompositeIndex_State) String() string {
    -	return proto.EnumName(CompositeIndex_State_name, int32(x))
    -}
    -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
    -	if err != nil {
    -		return err
    -	}
    -	*x = CompositeIndex_State(value)
    -	return nil
    -}
    -func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
    -}
    -
    -type Snapshot_Status int32
    -
    -const (
    -	Snapshot_INACTIVE Snapshot_Status = 0
    -	Snapshot_ACTIVE   Snapshot_Status = 1
    -)
    -
    -var Snapshot_Status_name = map[int32]string{
    -	0: "INACTIVE",
    -	1: "ACTIVE",
    -}
    -var Snapshot_Status_value = map[string]int32{
    -	"INACTIVE": 0,
    -	"ACTIVE":   1,
    -}
    -
    -func (x Snapshot_Status) Enum() *Snapshot_Status {
    -	p := new(Snapshot_Status)
    -	*p = x
    -	return p
    -}
    -func (x Snapshot_Status) String() string {
    -	return proto.EnumName(Snapshot_Status_name, int32(x))
    -}
    -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Snapshot_Status(value)
    -	return nil
    -}
    -func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
    -}
    -
    -type Query_Hint int32
    -
    -const (
    -	Query_ORDER_FIRST    Query_Hint = 1
    -	Query_ANCESTOR_FIRST Query_Hint = 2
    -	Query_FILTER_FIRST   Query_Hint = 3
    -)
    -
    -var Query_Hint_name = map[int32]string{
    -	1: "ORDER_FIRST",
    -	2: "ANCESTOR_FIRST",
    -	3: "FILTER_FIRST",
    -}
    -var Query_Hint_value = map[string]int32{
    -	"ORDER_FIRST":    1,
    -	"ANCESTOR_FIRST": 2,
    -	"FILTER_FIRST":   3,
    -}
    -
    -func (x Query_Hint) Enum() *Query_Hint {
    -	p := new(Query_Hint)
    -	*p = x
    -	return p
    -}
    -func (x Query_Hint) String() string {
    -	return proto.EnumName(Query_Hint_name, int32(x))
    -}
    -func (x *Query_Hint) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Query_Hint(value)
    -	return nil
    -}
    -func (Query_Hint) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
    -}
    -
    -type Query_Filter_Operator int32
    -
    -const (
    -	Query_Filter_LESS_THAN             Query_Filter_Operator = 1
    -	Query_Filter_LESS_THAN_OR_EQUAL    Query_Filter_Operator = 2
    -	Query_Filter_GREATER_THAN          Query_Filter_Operator = 3
    -	Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
    -	Query_Filter_EQUAL                 Query_Filter_Operator = 5
    -	Query_Filter_IN                    Query_Filter_Operator = 6
    -	Query_Filter_EXISTS                Query_Filter_Operator = 7
    -)
    -
    -var Query_Filter_Operator_name = map[int32]string{
    -	1: "LESS_THAN",
    -	2: "LESS_THAN_OR_EQUAL",
    -	3: "GREATER_THAN",
    -	4: "GREATER_THAN_OR_EQUAL",
    -	5: "EQUAL",
    -	6: "IN",
    -	7: "EXISTS",
    -}
    -var Query_Filter_Operator_value = map[string]int32{
    -	"LESS_THAN":             1,
    -	"LESS_THAN_OR_EQUAL":    2,
    -	"GREATER_THAN":          3,
    -	"GREATER_THAN_OR_EQUAL": 4,
    -	"EQUAL":                 5,
    -	"IN":                    6,
    -	"EXISTS":                7,
    -}
    -
    -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
    -	p := new(Query_Filter_Operator)
    -	*p = x
    -	return p
    -}
    -func (x Query_Filter_Operator) String() string {
    -	return proto.EnumName(Query_Filter_Operator_name, int32(x))
    -}
    -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Query_Filter_Operator(value)
    -	return nil
    -}
    -func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
    -}
    -
    -type Query_Order_Direction int32
    -
    -const (
    -	Query_Order_ASCENDING  Query_Order_Direction = 1
    -	Query_Order_DESCENDING Query_Order_Direction = 2
    -)
    -
    -var Query_Order_Direction_name = map[int32]string{
    -	1: "ASCENDING",
    -	2: "DESCENDING",
    -}
    -var Query_Order_Direction_value = map[string]int32{
    -	"ASCENDING":  1,
    -	"DESCENDING": 2,
    -}
    -
    -func (x Query_Order_Direction) Enum() *Query_Order_Direction {
    -	p := new(Query_Order_Direction)
    -	*p = x
    -	return p
    -}
    -func (x Query_Order_Direction) String() string {
    -	return proto.EnumName(Query_Order_Direction_name, int32(x))
    -}
    -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Query_Order_Direction(value)
    -	return nil
    -}
    -func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
    -}
    -
    -type Error_ErrorCode int32
    -
    -const (
    -	Error_BAD_REQUEST                  Error_ErrorCode = 1
    -	Error_CONCURRENT_TRANSACTION       Error_ErrorCode = 2
    -	Error_INTERNAL_ERROR               Error_ErrorCode = 3
    -	Error_NEED_INDEX                   Error_ErrorCode = 4
    -	Error_TIMEOUT                      Error_ErrorCode = 5
    -	Error_PERMISSION_DENIED            Error_ErrorCode = 6
    -	Error_BIGTABLE_ERROR               Error_ErrorCode = 7
    -	Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
    -	Error_CAPABILITY_DISABLED          Error_ErrorCode = 9
    -	Error_TRY_ALTERNATE_BACKEND        Error_ErrorCode = 10
    -	Error_SAFE_TIME_TOO_OLD            Error_ErrorCode = 11
    -)
    -
    -var Error_ErrorCode_name = map[int32]string{
    -	1:  "BAD_REQUEST",
    -	2:  "CONCURRENT_TRANSACTION",
    -	3:  "INTERNAL_ERROR",
    -	4:  "NEED_INDEX",
    -	5:  "TIMEOUT",
    -	6:  "PERMISSION_DENIED",
    -	7:  "BIGTABLE_ERROR",
    -	8:  "COMMITTED_BUT_STILL_APPLYING",
    -	9:  "CAPABILITY_DISABLED",
    -	10: "TRY_ALTERNATE_BACKEND",
    -	11: "SAFE_TIME_TOO_OLD",
    -}
    -var Error_ErrorCode_value = map[string]int32{
    -	"BAD_REQUEST":                  1,
    -	"CONCURRENT_TRANSACTION":       2,
    -	"INTERNAL_ERROR":               3,
    -	"NEED_INDEX":                   4,
    -	"TIMEOUT":                      5,
    -	"PERMISSION_DENIED":            6,
    -	"BIGTABLE_ERROR":               7,
    -	"COMMITTED_BUT_STILL_APPLYING": 8,
    -	"CAPABILITY_DISABLED":          9,
    -	"TRY_ALTERNATE_BACKEND":        10,
    -	"SAFE_TIME_TOO_OLD":            11,
    -}
    -
    -func (x Error_ErrorCode) Enum() *Error_ErrorCode {
    -	p := new(Error_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x Error_ErrorCode) String() string {
    -	return proto.EnumName(Error_ErrorCode_name, int32(x))
    -}
    -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = Error_ErrorCode(value)
    -	return nil
    -}
    -func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
    -}
    -
    -type PutRequest_AutoIdPolicy int32
    -
    -const (
    -	PutRequest_CURRENT    PutRequest_AutoIdPolicy = 0
    -	PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
    -)
    -
    -var PutRequest_AutoIdPolicy_name = map[int32]string{
    -	0: "CURRENT",
    -	1: "SEQUENTIAL",
    -}
    -var PutRequest_AutoIdPolicy_value = map[string]int32{
    -	"CURRENT":    0,
    -	"SEQUENTIAL": 1,
    -}
    -
    -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
    -	p := new(PutRequest_AutoIdPolicy)
    -	*p = x
    -	return p
    -}
    -func (x PutRequest_AutoIdPolicy) String() string {
    -	return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
    -}
    -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
    -	if err != nil {
    -		return err
    -	}
    -	*x = PutRequest_AutoIdPolicy(value)
    -	return nil
    -}
    -func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
    -}
    -
    -type BeginTransactionRequest_TransactionMode int32
    -
    -const (
    -	BeginTransactionRequest_UNKNOWN    BeginTransactionRequest_TransactionMode = 0
    -	BeginTransactionRequest_READ_ONLY  BeginTransactionRequest_TransactionMode = 1
    -	BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
    -)
    -
    -var BeginTransactionRequest_TransactionMode_name = map[int32]string{
    -	0: "UNKNOWN",
    -	1: "READ_ONLY",
    -	2: "READ_WRITE",
    -}
    -var BeginTransactionRequest_TransactionMode_value = map[string]int32{
    -	"UNKNOWN":    0,
    -	"READ_ONLY":  1,
    -	"READ_WRITE": 2,
    -}
    -
    -func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
    -	p := new(BeginTransactionRequest_TransactionMode)
    -	*p = x
    -	return p
    -}
    -func (x BeginTransactionRequest_TransactionMode) String() string {
    -	return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
    -}
    -func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = BeginTransactionRequest_TransactionMode(value)
    -	return nil
    -}
    -func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
    -}
    -
    -type Action struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Action) Reset()         { *m = Action{} }
    -func (m *Action) String() string { return proto.CompactTextString(m) }
    -func (*Action) ProtoMessage()    {}
    -func (*Action) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
    -}
    -func (m *Action) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Action.Unmarshal(m, b)
    -}
    -func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Action.Marshal(b, m, deterministic)
    -}
    -func (dst *Action) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Action.Merge(dst, src)
    -}
    -func (m *Action) XXX_Size() int {
    -	return xxx_messageInfo_Action.Size(m)
    -}
    -func (m *Action) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Action.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Action proto.InternalMessageInfo
    -
    -type PropertyValue struct {
    -	Int64Value           *int64                        `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
    -	BooleanValue         *bool                         `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
    -	StringValue          *string                       `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
    -	DoubleValue          *float64                      `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
    -	Pointvalue           *PropertyValue_PointValue     `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
    -	Uservalue            *PropertyValue_UserValue      `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
    -	Referencevalue       *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
    -	XXX_unrecognized     []byte                        `json:"-"`
    -	XXX_sizecache        int32                         `json:"-"`
    -}
    -
    -func (m *PropertyValue) Reset()         { *m = PropertyValue{} }
    -func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
    -func (*PropertyValue) ProtoMessage()    {}
    -func (*PropertyValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
    -}
    -func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
    -}
    -func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
    -}
    -func (dst *PropertyValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PropertyValue.Merge(dst, src)
    -}
    -func (m *PropertyValue) XXX_Size() int {
    -	return xxx_messageInfo_PropertyValue.Size(m)
    -}
    -func (m *PropertyValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PropertyValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
    -
    -func (m *PropertyValue) GetInt64Value() int64 {
    -	if m != nil && m.Int64Value != nil {
    -		return *m.Int64Value
    -	}
    -	return 0
    -}
    -
    -func (m *PropertyValue) GetBooleanValue() bool {
    -	if m != nil && m.BooleanValue != nil {
    -		return *m.BooleanValue
    -	}
    -	return false
    -}
    -
    -func (m *PropertyValue) GetStringValue() string {
    -	if m != nil && m.StringValue != nil {
    -		return *m.StringValue
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue) GetDoubleValue() float64 {
    -	if m != nil && m.DoubleValue != nil {
    -		return *m.DoubleValue
    -	}
    -	return 0
    -}
    -
    -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
    -	if m != nil {
    -		return m.Pointvalue
    -	}
    -	return nil
    -}
    -
    -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
    -	if m != nil {
    -		return m.Uservalue
    -	}
    -	return nil
    -}
    -
    -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
    -	if m != nil {
    -		return m.Referencevalue
    -	}
    -	return nil
    -}
    -
    -type PropertyValue_PointValue struct {
    -	X                    *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
    -	Y                    *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *PropertyValue_PointValue) Reset()         { *m = PropertyValue_PointValue{} }
    -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
    -func (*PropertyValue_PointValue) ProtoMessage()    {}
    -func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
    -}
    -func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
    -}
    -func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
    -}
    -func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
    -}
    -func (m *PropertyValue_PointValue) XXX_Size() int {
    -	return xxx_messageInfo_PropertyValue_PointValue.Size(m)
    -}
    -func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
    -
    -func (m *PropertyValue_PointValue) GetX() float64 {
    -	if m != nil && m.X != nil {
    -		return *m.X
    -	}
    -	return 0
    -}
    -
    -func (m *PropertyValue_PointValue) GetY() float64 {
    -	if m != nil && m.Y != nil {
    -		return *m.Y
    -	}
    -	return 0
    -}
    -
    -type PropertyValue_UserValue struct {
    -	Email                *string  `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
    -	AuthDomain           *string  `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
    -	Nickname             *string  `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
    -	FederatedIdentity    *string  `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
    -	FederatedProvider    *string  `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *PropertyValue_UserValue) Reset()         { *m = PropertyValue_UserValue{} }
    -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
    -func (*PropertyValue_UserValue) ProtoMessage()    {}
    -func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
    -}
    -func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
    -}
    -func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
    -}
    -func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
    -}
    -func (m *PropertyValue_UserValue) XXX_Size() int {
    -	return xxx_messageInfo_PropertyValue_UserValue.Size(m)
    -}
    -func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
    -
    -func (m *PropertyValue_UserValue) GetEmail() string {
    -	if m != nil && m.Email != nil {
    -		return *m.Email
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_UserValue) GetAuthDomain() string {
    -	if m != nil && m.AuthDomain != nil {
    -		return *m.AuthDomain
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_UserValue) GetNickname() string {
    -	if m != nil && m.Nickname != nil {
    -		return *m.Nickname
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
    -	if m != nil && m.FederatedIdentity != nil {
    -		return *m.FederatedIdentity
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_UserValue) GetFederatedProvider() string {
    -	if m != nil && m.FederatedProvider != nil {
    -		return *m.FederatedProvider
    -	}
    -	return ""
    -}
    -
    -type PropertyValue_ReferenceValue struct {
    -	App                  *string                                     `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
    -	NameSpace            *string                                     `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
    -	Pathelement          []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                                    `json:"-"`
    -	XXX_unrecognized     []byte                                      `json:"-"`
    -	XXX_sizecache        int32                                       `json:"-"`
    -}
    -
    -func (m *PropertyValue_ReferenceValue) Reset()         { *m = PropertyValue_ReferenceValue{} }
    -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
    -func (*PropertyValue_ReferenceValue) ProtoMessage()    {}
    -func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
    -}
    -func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
    -}
    -func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
    -}
    -func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
    -}
    -func (m *PropertyValue_ReferenceValue) XXX_Size() int {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
    -}
    -func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
    -
    -func (m *PropertyValue_ReferenceValue) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
    -	if m != nil && m.NameSpace != nil {
    -		return *m.NameSpace
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
    -	if m != nil {
    -		return m.Pathelement
    -	}
    -	return nil
    -}
    -
    -type PropertyValue_ReferenceValue_PathElement struct {
    -	Type                 *string  `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
    -	Id                   *int64   `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
    -	Name                 *string  `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
    -	*m = PropertyValue_ReferenceValue_PathElement{}
    -}
    -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
    -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage()    {}
    -func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
    -}
    -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
    -}
    -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
    -}
    -func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
    -}
    -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
    -	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
    -}
    -func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
    -
    -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
    -	if m != nil && m.Type != nil {
    -		return *m.Type
    -	}
    -	return ""
    -}
    -
    -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
    -	if m != nil && m.Id != nil {
    -		return *m.Id
    -	}
    -	return 0
    -}
    -
    -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
    -	if m != nil && m.Name != nil {
    -		return *m.Name
    -	}
    -	return ""
    -}
    -
    -type Property struct {
    -	Meaning               *Property_Meaning               `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
    -	MeaningUri            *string                         `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
    -	Name                  *string                         `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
    -	Value                 *PropertyValue                  `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
    -	Multiple              *bool                           `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
    -	Searchable            *bool                           `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
    -	FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
    -	Locale                *string                         `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
    -	XXX_NoUnkeyedLiteral  struct{}                        `json:"-"`
    -	XXX_unrecognized      []byte                          `json:"-"`
    -	XXX_sizecache         int32                           `json:"-"`
    -}
    -
    -func (m *Property) Reset()         { *m = Property{} }
    -func (m *Property) String() string { return proto.CompactTextString(m) }
    -func (*Property) ProtoMessage()    {}
    -func (*Property) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
    -}
    -func (m *Property) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Property.Unmarshal(m, b)
    -}
    -func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Property.Marshal(b, m, deterministic)
    -}
    -func (dst *Property) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Property.Merge(dst, src)
    -}
    -func (m *Property) XXX_Size() int {
    -	return xxx_messageInfo_Property.Size(m)
    -}
    -func (m *Property) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Property.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Property proto.InternalMessageInfo
    -
    -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
    -const Default_Property_Searchable bool = false
    -const Default_Property_Locale string = "en"
    -
    -func (m *Property) GetMeaning() Property_Meaning {
    -	if m != nil && m.Meaning != nil {
    -		return *m.Meaning
    -	}
    -	return Default_Property_Meaning
    -}
    -
    -func (m *Property) GetMeaningUri() string {
    -	if m != nil && m.MeaningUri != nil {
    -		return *m.MeaningUri
    -	}
    -	return ""
    -}
    -
    -func (m *Property) GetName() string {
    -	if m != nil && m.Name != nil {
    -		return *m.Name
    -	}
    -	return ""
    -}
    -
    -func (m *Property) GetValue() *PropertyValue {
    -	if m != nil {
    -		return m.Value
    -	}
    -	return nil
    -}
    -
    -func (m *Property) GetMultiple() bool {
    -	if m != nil && m.Multiple != nil {
    -		return *m.Multiple
    -	}
    -	return false
    -}
    -
    -func (m *Property) GetSearchable() bool {
    -	if m != nil && m.Searchable != nil {
    -		return *m.Searchable
    -	}
    -	return Default_Property_Searchable
    -}
    -
    -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
    -	if m != nil && m.FtsTokenizationOption != nil {
    -		return *m.FtsTokenizationOption
    -	}
    -	return Property_HTML
    -}
    -
    -func (m *Property) GetLocale() string {
    -	if m != nil && m.Locale != nil {
    -		return *m.Locale
    -	}
    -	return Default_Property_Locale
    -}
    -
    -type Path struct {
    -	Element              []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *Path) Reset()         { *m = Path{} }
    -func (m *Path) String() string { return proto.CompactTextString(m) }
    -func (*Path) ProtoMessage()    {}
    -func (*Path) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
    -}
    -func (m *Path) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Path.Unmarshal(m, b)
    -}
    -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Path.Marshal(b, m, deterministic)
    -}
    -func (dst *Path) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Path.Merge(dst, src)
    -}
    -func (m *Path) XXX_Size() int {
    -	return xxx_messageInfo_Path.Size(m)
    -}
    -func (m *Path) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Path.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Path proto.InternalMessageInfo
    -
    -func (m *Path) GetElement() []*Path_Element {
    -	if m != nil {
    -		return m.Element
    -	}
    -	return nil
    -}
    -
    -type Path_Element struct {
    -	Type                 *string  `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
    -	Id                   *int64   `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
    -	Name                 *string  `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Path_Element) Reset()         { *m = Path_Element{} }
    -func (m *Path_Element) String() string { return proto.CompactTextString(m) }
    -func (*Path_Element) ProtoMessage()    {}
    -func (*Path_Element) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
    -}
    -func (m *Path_Element) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Path_Element.Unmarshal(m, b)
    -}
    -func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
    -}
    -func (dst *Path_Element) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Path_Element.Merge(dst, src)
    -}
    -func (m *Path_Element) XXX_Size() int {
    -	return xxx_messageInfo_Path_Element.Size(m)
    -}
    -func (m *Path_Element) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Path_Element.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Path_Element proto.InternalMessageInfo
    -
    -func (m *Path_Element) GetType() string {
    -	if m != nil && m.Type != nil {
    -		return *m.Type
    -	}
    -	return ""
    -}
    -
    -func (m *Path_Element) GetId() int64 {
    -	if m != nil && m.Id != nil {
    -		return *m.Id
    -	}
    -	return 0
    -}
    -
    -func (m *Path_Element) GetName() string {
    -	if m != nil && m.Name != nil {
    -		return *m.Name
    -	}
    -	return ""
    -}
    -
    -type Reference struct {
    -	App                  *string  `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
    -	NameSpace            *string  `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
    -	Path                 *Path    `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Reference) Reset()         { *m = Reference{} }
    -func (m *Reference) String() string { return proto.CompactTextString(m) }
    -func (*Reference) ProtoMessage()    {}
    -func (*Reference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
    -}
    -func (m *Reference) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Reference.Unmarshal(m, b)
    -}
    -func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
    -}
    -func (dst *Reference) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Reference.Merge(dst, src)
    -}
    -func (m *Reference) XXX_Size() int {
    -	return xxx_messageInfo_Reference.Size(m)
    -}
    -func (m *Reference) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Reference.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Reference proto.InternalMessageInfo
    -
    -func (m *Reference) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -func (m *Reference) GetNameSpace() string {
    -	if m != nil && m.NameSpace != nil {
    -		return *m.NameSpace
    -	}
    -	return ""
    -}
    -
    -func (m *Reference) GetPath() *Path {
    -	if m != nil {
    -		return m.Path
    -	}
    -	return nil
    -}
    -
    -type User struct {
    -	Email                *string  `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
    -	AuthDomain           *string  `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
    -	Nickname             *string  `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
    -	FederatedIdentity    *string  `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
    -	FederatedProvider    *string  `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *User) Reset()         { *m = User{} }
    -func (m *User) String() string { return proto.CompactTextString(m) }
    -func (*User) ProtoMessage()    {}
    -func (*User) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
    -}
    -func (m *User) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_User.Unmarshal(m, b)
    -}
    -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_User.Marshal(b, m, deterministic)
    -}
    -func (dst *User) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_User.Merge(dst, src)
    -}
    -func (m *User) XXX_Size() int {
    -	return xxx_messageInfo_User.Size(m)
    -}
    -func (m *User) XXX_DiscardUnknown() {
    -	xxx_messageInfo_User.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_User proto.InternalMessageInfo
    -
    -func (m *User) GetEmail() string {
    -	if m != nil && m.Email != nil {
    -		return *m.Email
    -	}
    -	return ""
    -}
    -
    -func (m *User) GetAuthDomain() string {
    -	if m != nil && m.AuthDomain != nil {
    -		return *m.AuthDomain
    -	}
    -	return ""
    -}
    -
    -func (m *User) GetNickname() string {
    -	if m != nil && m.Nickname != nil {
    -		return *m.Nickname
    -	}
    -	return ""
    -}
    -
    -func (m *User) GetFederatedIdentity() string {
    -	if m != nil && m.FederatedIdentity != nil {
    -		return *m.FederatedIdentity
    -	}
    -	return ""
    -}
    -
    -func (m *User) GetFederatedProvider() string {
    -	if m != nil && m.FederatedProvider != nil {
    -		return *m.FederatedProvider
    -	}
    -	return ""
    -}
    -
    -type EntityProto struct {
    -	Key                  *Reference        `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
    -	EntityGroup          *Path             `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
    -	Owner                *User             `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
    -	Kind                 *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
    -	KindUri              *string           `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
    -	Property             []*Property       `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
    -	RawProperty          []*Property       `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
    -	Rank                 *int32            `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *EntityProto) Reset()         { *m = EntityProto{} }
    -func (m *EntityProto) String() string { return proto.CompactTextString(m) }
    -func (*EntityProto) ProtoMessage()    {}
    -func (*EntityProto) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
    -}
    -func (m *EntityProto) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_EntityProto.Unmarshal(m, b)
    -}
    -func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
    -}
    -func (dst *EntityProto) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_EntityProto.Merge(dst, src)
    -}
    -func (m *EntityProto) XXX_Size() int {
    -	return xxx_messageInfo_EntityProto.Size(m)
    -}
    -func (m *EntityProto) XXX_DiscardUnknown() {
    -	xxx_messageInfo_EntityProto.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_EntityProto proto.InternalMessageInfo
    -
    -func (m *EntityProto) GetKey() *Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *EntityProto) GetEntityGroup() *Path {
    -	if m != nil {
    -		return m.EntityGroup
    -	}
    -	return nil
    -}
    -
    -func (m *EntityProto) GetOwner() *User {
    -	if m != nil {
    -		return m.Owner
    -	}
    -	return nil
    -}
    -
    -func (m *EntityProto) GetKind() EntityProto_Kind {
    -	if m != nil && m.Kind != nil {
    -		return *m.Kind
    -	}
    -	return EntityProto_GD_CONTACT
    -}
    -
    -func (m *EntityProto) GetKindUri() string {
    -	if m != nil && m.KindUri != nil {
    -		return *m.KindUri
    -	}
    -	return ""
    -}
    -
    -func (m *EntityProto) GetProperty() []*Property {
    -	if m != nil {
    -		return m.Property
    -	}
    -	return nil
    -}
    -
    -func (m *EntityProto) GetRawProperty() []*Property {
    -	if m != nil {
    -		return m.RawProperty
    -	}
    -	return nil
    -}
    -
    -func (m *EntityProto) GetRank() int32 {
    -	if m != nil && m.Rank != nil {
    -		return *m.Rank
    -	}
    -	return 0
    -}
    -
    -type CompositeProperty struct {
    -	IndexId              *int64   `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
    -	Value                []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *CompositeProperty) Reset()         { *m = CompositeProperty{} }
    -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
    -func (*CompositeProperty) ProtoMessage()    {}
    -func (*CompositeProperty) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
    -}
    -func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
    -}
    -func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
    -}
    -func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompositeProperty.Merge(dst, src)
    -}
    -func (m *CompositeProperty) XXX_Size() int {
    -	return xxx_messageInfo_CompositeProperty.Size(m)
    -}
    -func (m *CompositeProperty) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
    -
    -func (m *CompositeProperty) GetIndexId() int64 {
    -	if m != nil && m.IndexId != nil {
    -		return *m.IndexId
    -	}
    -	return 0
    -}
    -
    -func (m *CompositeProperty) GetValue() []string {
    -	if m != nil {
    -		return m.Value
    -	}
    -	return nil
    -}
    -
    -type Index struct {
    -	EntityType           *string           `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
    -	Ancestor             *bool             `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
    -	Property             []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *Index) Reset()         { *m = Index{} }
    -func (m *Index) String() string { return proto.CompactTextString(m) }
    -func (*Index) ProtoMessage()    {}
    -func (*Index) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
    -}
    -func (m *Index) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Index.Unmarshal(m, b)
    -}
    -func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Index.Marshal(b, m, deterministic)
    -}
    -func (dst *Index) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Index.Merge(dst, src)
    -}
    -func (m *Index) XXX_Size() int {
    -	return xxx_messageInfo_Index.Size(m)
    -}
    -func (m *Index) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Index.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Index proto.InternalMessageInfo
    -
    -func (m *Index) GetEntityType() string {
    -	if m != nil && m.EntityType != nil {
    -		return *m.EntityType
    -	}
    -	return ""
    -}
    -
    -func (m *Index) GetAncestor() bool {
    -	if m != nil && m.Ancestor != nil {
    -		return *m.Ancestor
    -	}
    -	return false
    -}
    -
    -func (m *Index) GetProperty() []*Index_Property {
    -	if m != nil {
    -		return m.Property
    -	}
    -	return nil
    -}
    -
    -type Index_Property struct {
    -	Name                 *string                   `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
    -	Direction            *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
    -	XXX_unrecognized     []byte                    `json:"-"`
    -	XXX_sizecache        int32                     `json:"-"`
    -}
    -
    -func (m *Index_Property) Reset()         { *m = Index_Property{} }
    -func (m *Index_Property) String() string { return proto.CompactTextString(m) }
    -func (*Index_Property) ProtoMessage()    {}
    -func (*Index_Property) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
    -}
    -func (m *Index_Property) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Index_Property.Unmarshal(m, b)
    -}
    -func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
    -}
    -func (dst *Index_Property) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Index_Property.Merge(dst, src)
    -}
    -func (m *Index_Property) XXX_Size() int {
    -	return xxx_messageInfo_Index_Property.Size(m)
    -}
    -func (m *Index_Property) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Index_Property.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Index_Property proto.InternalMessageInfo
    -
    -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
    -
    -func (m *Index_Property) GetName() string {
    -	if m != nil && m.Name != nil {
    -		return *m.Name
    -	}
    -	return ""
    -}
    -
    -func (m *Index_Property) GetDirection() Index_Property_Direction {
    -	if m != nil && m.Direction != nil {
    -		return *m.Direction
    -	}
    -	return Default_Index_Property_Direction
    -}
    -
    -type CompositeIndex struct {
    -	AppId                *string               `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
    -	Id                   *int64                `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
    -	Definition           *Index                `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
    -	State                *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
    -	OnlyUseIfRequired    *bool                 `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
    -	XXX_unrecognized     []byte                `json:"-"`
    -	XXX_sizecache        int32                 `json:"-"`
    -}
    -
    -func (m *CompositeIndex) Reset()         { *m = CompositeIndex{} }
    -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
    -func (*CompositeIndex) ProtoMessage()    {}
    -func (*CompositeIndex) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
    -}
    -func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
    -}
    -func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
    -}
    -func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompositeIndex.Merge(dst, src)
    -}
    -func (m *CompositeIndex) XXX_Size() int {
    -	return xxx_messageInfo_CompositeIndex.Size(m)
    -}
    -func (m *CompositeIndex) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
    -
    -const Default_CompositeIndex_OnlyUseIfRequired bool = false
    -
    -func (m *CompositeIndex) GetAppId() string {
    -	if m != nil && m.AppId != nil {
    -		return *m.AppId
    -	}
    -	return ""
    -}
    -
    -func (m *CompositeIndex) GetId() int64 {
    -	if m != nil && m.Id != nil {
    -		return *m.Id
    -	}
    -	return 0
    -}
    -
    -func (m *CompositeIndex) GetDefinition() *Index {
    -	if m != nil {
    -		return m.Definition
    -	}
    -	return nil
    -}
    -
    -func (m *CompositeIndex) GetState() CompositeIndex_State {
    -	if m != nil && m.State != nil {
    -		return *m.State
    -	}
    -	return CompositeIndex_WRITE_ONLY
    -}
    -
    -func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
    -	if m != nil && m.OnlyUseIfRequired != nil {
    -		return *m.OnlyUseIfRequired
    -	}
    -	return Default_CompositeIndex_OnlyUseIfRequired
    -}
    -
    -type IndexPostfix struct {
    -	IndexValue           []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
    -	Key                  *Reference                 `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
    -	Before               *bool                      `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
    -	XXX_unrecognized     []byte                     `json:"-"`
    -	XXX_sizecache        int32                      `json:"-"`
    -}
    -
    -func (m *IndexPostfix) Reset()         { *m = IndexPostfix{} }
    -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
    -func (*IndexPostfix) ProtoMessage()    {}
    -func (*IndexPostfix) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
    -}
    -func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
    -}
    -func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
    -}
    -func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IndexPostfix.Merge(dst, src)
    -}
    -func (m *IndexPostfix) XXX_Size() int {
    -	return xxx_messageInfo_IndexPostfix.Size(m)
    -}
    -func (m *IndexPostfix) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
    -
    -const Default_IndexPostfix_Before bool = true
    -
    -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
    -	if m != nil {
    -		return m.IndexValue
    -	}
    -	return nil
    -}
    -
    -func (m *IndexPostfix) GetKey() *Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *IndexPostfix) GetBefore() bool {
    -	if m != nil && m.Before != nil {
    -		return *m.Before
    -	}
    -	return Default_IndexPostfix_Before
    -}
    -
    -type IndexPostfix_IndexValue struct {
    -	PropertyName         *string        `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
    -	Value                *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
    -	XXX_unrecognized     []byte         `json:"-"`
    -	XXX_sizecache        int32          `json:"-"`
    -}
    -
    -func (m *IndexPostfix_IndexValue) Reset()         { *m = IndexPostfix_IndexValue{} }
    -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
    -func (*IndexPostfix_IndexValue) ProtoMessage()    {}
    -func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
    -}
    -func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
    -}
    -func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
    -}
    -func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
    -}
    -func (m *IndexPostfix_IndexValue) XXX_Size() int {
    -	return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
    -}
    -func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
    -
    -func (m *IndexPostfix_IndexValue) GetPropertyName() string {
    -	if m != nil && m.PropertyName != nil {
    -		return *m.PropertyName
    -	}
    -	return ""
    -}
    -
    -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
    -	if m != nil {
    -		return m.Value
    -	}
    -	return nil
    -}
    -
    -type IndexPosition struct {
    -	Key                  *string  `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
    -	Before               *bool    `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *IndexPosition) Reset()         { *m = IndexPosition{} }
    -func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
    -func (*IndexPosition) ProtoMessage()    {}
    -func (*IndexPosition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
    -}
    -func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
    -}
    -func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
    -}
    -func (dst *IndexPosition) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_IndexPosition.Merge(dst, src)
    -}
    -func (m *IndexPosition) XXX_Size() int {
    -	return xxx_messageInfo_IndexPosition.Size(m)
    -}
    -func (m *IndexPosition) XXX_DiscardUnknown() {
    -	xxx_messageInfo_IndexPosition.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
    -
    -const Default_IndexPosition_Before bool = true
    -
    -func (m *IndexPosition) GetKey() string {
    -	if m != nil && m.Key != nil {
    -		return *m.Key
    -	}
    -	return ""
    -}
    -
    -func (m *IndexPosition) GetBefore() bool {
    -	if m != nil && m.Before != nil {
    -		return *m.Before
    -	}
    -	return Default_IndexPosition_Before
    -}
    -
    -type Snapshot struct {
    -	Ts                   *int64   `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Snapshot) Reset()         { *m = Snapshot{} }
    -func (m *Snapshot) String() string { return proto.CompactTextString(m) }
    -func (*Snapshot) ProtoMessage()    {}
    -func (*Snapshot) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
    -}
    -func (m *Snapshot) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Snapshot.Unmarshal(m, b)
    -}
    -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
    -}
    -func (dst *Snapshot) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Snapshot.Merge(dst, src)
    -}
    -func (m *Snapshot) XXX_Size() int {
    -	return xxx_messageInfo_Snapshot.Size(m)
    -}
    -func (m *Snapshot) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Snapshot.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Snapshot proto.InternalMessageInfo
    -
    -func (m *Snapshot) GetTs() int64 {
    -	if m != nil && m.Ts != nil {
    -		return *m.Ts
    -	}
    -	return 0
    -}
    -
    -type InternalHeader struct {
    -	Qos                  *string  `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *InternalHeader) Reset()         { *m = InternalHeader{} }
    -func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
    -func (*InternalHeader) ProtoMessage()    {}
    -func (*InternalHeader) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
    -}
    -func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
    -}
    -func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
    -}
    -func (dst *InternalHeader) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_InternalHeader.Merge(dst, src)
    -}
    -func (m *InternalHeader) XXX_Size() int {
    -	return xxx_messageInfo_InternalHeader.Size(m)
    -}
    -func (m *InternalHeader) XXX_DiscardUnknown() {
    -	xxx_messageInfo_InternalHeader.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
    -
    -func (m *InternalHeader) GetQos() string {
    -	if m != nil && m.Qos != nil {
    -		return *m.Qos
    -	}
    -	return ""
    -}
    -
    -type Transaction struct {
    -	Header               *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
    -	Handle               *uint64         `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
    -	App                  *string         `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
    -	MarkChanges          *bool           `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *Transaction) Reset()         { *m = Transaction{} }
    -func (m *Transaction) String() string { return proto.CompactTextString(m) }
    -func (*Transaction) ProtoMessage()    {}
    -func (*Transaction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
    -}
    -func (m *Transaction) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Transaction.Unmarshal(m, b)
    -}
    -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
    -}
    -func (dst *Transaction) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Transaction.Merge(dst, src)
    -}
    -func (m *Transaction) XXX_Size() int {
    -	return xxx_messageInfo_Transaction.Size(m)
    -}
    -func (m *Transaction) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Transaction.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Transaction proto.InternalMessageInfo
    -
    -const Default_Transaction_MarkChanges bool = false
    -
    -func (m *Transaction) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *Transaction) GetHandle() uint64 {
    -	if m != nil && m.Handle != nil {
    -		return *m.Handle
    -	}
    -	return 0
    -}
    -
    -func (m *Transaction) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -func (m *Transaction) GetMarkChanges() bool {
    -	if m != nil && m.MarkChanges != nil {
    -		return *m.MarkChanges
    -	}
    -	return Default_Transaction_MarkChanges
    -}
    -
    -type Query struct {
    -	Header               *InternalHeader   `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
    -	App                  *string           `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
    -	NameSpace            *string           `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
    -	Kind                 *string           `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
    -	Ancestor             *Reference        `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
    -	Filter               []*Query_Filter   `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
    -	SearchQuery          *string           `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
    -	Order                []*Query_Order    `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
    -	Hint                 *Query_Hint       `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
    -	Count                *int32            `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
    -	Offset               *int32            `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
    -	Limit                *int32            `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
    -	CompiledCursor       *CompiledCursor   `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
    -	EndCompiledCursor    *CompiledCursor   `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
    -	CompositeIndex       []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
    -	RequirePerfectPlan   *bool             `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
    -	KeysOnly             *bool             `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
    -	Transaction          *Transaction      `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
    -	Compile              *bool             `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
    -	FailoverMs           *int64            `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
    -	Strong               *bool             `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
    -	PropertyName         []string          `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
    -	GroupByPropertyName  []string          `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
    -	Distinct             *bool             `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
    -	MinSafeTimeSeconds   *int64            `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
    -	SafeReplicaName      []string          `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
    -	PersistOffset        *bool             `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *Query) Reset()         { *m = Query{} }
    -func (m *Query) String() string { return proto.CompactTextString(m) }
    -func (*Query) ProtoMessage()    {}
    -func (*Query) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
    -}
    -func (m *Query) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Query.Unmarshal(m, b)
    -}
    -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Query.Marshal(b, m, deterministic)
    -}
    -func (dst *Query) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Query.Merge(dst, src)
    -}
    -func (m *Query) XXX_Size() int {
    -	return xxx_messageInfo_Query.Size(m)
    -}
    -func (m *Query) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Query.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Query proto.InternalMessageInfo
    -
    -const Default_Query_Offset int32 = 0
    -const Default_Query_RequirePerfectPlan bool = false
    -const Default_Query_KeysOnly bool = false
    -const Default_Query_Compile bool = false
    -const Default_Query_PersistOffset bool = false
    -
    -func (m *Query) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -func (m *Query) GetNameSpace() string {
    -	if m != nil && m.NameSpace != nil {
    -		return *m.NameSpace
    -	}
    -	return ""
    -}
    -
    -func (m *Query) GetKind() string {
    -	if m != nil && m.Kind != nil {
    -		return *m.Kind
    -	}
    -	return ""
    -}
    -
    -func (m *Query) GetAncestor() *Reference {
    -	if m != nil {
    -		return m.Ancestor
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetFilter() []*Query_Filter {
    -	if m != nil {
    -		return m.Filter
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetSearchQuery() string {
    -	if m != nil && m.SearchQuery != nil {
    -		return *m.SearchQuery
    -	}
    -	return ""
    -}
    -
    -func (m *Query) GetOrder() []*Query_Order {
    -	if m != nil {
    -		return m.Order
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetHint() Query_Hint {
    -	if m != nil && m.Hint != nil {
    -		return *m.Hint
    -	}
    -	return Query_ORDER_FIRST
    -}
    -
    -func (m *Query) GetCount() int32 {
    -	if m != nil && m.Count != nil {
    -		return *m.Count
    -	}
    -	return 0
    -}
    -
    -func (m *Query) GetOffset() int32 {
    -	if m != nil && m.Offset != nil {
    -		return *m.Offset
    -	}
    -	return Default_Query_Offset
    -}
    -
    -func (m *Query) GetLimit() int32 {
    -	if m != nil && m.Limit != nil {
    -		return *m.Limit
    -	}
    -	return 0
    -}
    -
    -func (m *Query) GetCompiledCursor() *CompiledCursor {
    -	if m != nil {
    -		return m.CompiledCursor
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetEndCompiledCursor() *CompiledCursor {
    -	if m != nil {
    -		return m.EndCompiledCursor
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetCompositeIndex() []*CompositeIndex {
    -	if m != nil {
    -		return m.CompositeIndex
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetRequirePerfectPlan() bool {
    -	if m != nil && m.RequirePerfectPlan != nil {
    -		return *m.RequirePerfectPlan
    -	}
    -	return Default_Query_RequirePerfectPlan
    -}
    -
    -func (m *Query) GetKeysOnly() bool {
    -	if m != nil && m.KeysOnly != nil {
    -		return *m.KeysOnly
    -	}
    -	return Default_Query_KeysOnly
    -}
    -
    -func (m *Query) GetTransaction() *Transaction {
    -	if m != nil {
    -		return m.Transaction
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetCompile() bool {
    -	if m != nil && m.Compile != nil {
    -		return *m.Compile
    -	}
    -	return Default_Query_Compile
    -}
    -
    -func (m *Query) GetFailoverMs() int64 {
    -	if m != nil && m.FailoverMs != nil {
    -		return *m.FailoverMs
    -	}
    -	return 0
    -}
    -
    -func (m *Query) GetStrong() bool {
    -	if m != nil && m.Strong != nil {
    -		return *m.Strong
    -	}
    -	return false
    -}
    -
    -func (m *Query) GetPropertyName() []string {
    -	if m != nil {
    -		return m.PropertyName
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetGroupByPropertyName() []string {
    -	if m != nil {
    -		return m.GroupByPropertyName
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetDistinct() bool {
    -	if m != nil && m.Distinct != nil {
    -		return *m.Distinct
    -	}
    -	return false
    -}
    -
    -func (m *Query) GetMinSafeTimeSeconds() int64 {
    -	if m != nil && m.MinSafeTimeSeconds != nil {
    -		return *m.MinSafeTimeSeconds
    -	}
    -	return 0
    -}
    -
    -func (m *Query) GetSafeReplicaName() []string {
    -	if m != nil {
    -		return m.SafeReplicaName
    -	}
    -	return nil
    -}
    -
    -func (m *Query) GetPersistOffset() bool {
    -	if m != nil && m.PersistOffset != nil {
    -		return *m.PersistOffset
    -	}
    -	return Default_Query_PersistOffset
    -}
    -
    -type Query_Filter struct {
    -	Op                   *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
    -	Property             []*Property            `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
    -	XXX_unrecognized     []byte                 `json:"-"`
    -	XXX_sizecache        int32                  `json:"-"`
    -}
    -
    -func (m *Query_Filter) Reset()         { *m = Query_Filter{} }
    -func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
    -func (*Query_Filter) ProtoMessage()    {}
    -func (*Query_Filter) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
    -}
    -func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
    -}
    -func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
    -}
    -func (dst *Query_Filter) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Query_Filter.Merge(dst, src)
    -}
    -func (m *Query_Filter) XXX_Size() int {
    -	return xxx_messageInfo_Query_Filter.Size(m)
    -}
    -func (m *Query_Filter) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Query_Filter.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
    -
    -func (m *Query_Filter) GetOp() Query_Filter_Operator {
    -	if m != nil && m.Op != nil {
    -		return *m.Op
    -	}
    -	return Query_Filter_LESS_THAN
    -}
    -
    -func (m *Query_Filter) GetProperty() []*Property {
    -	if m != nil {
    -		return m.Property
    -	}
    -	return nil
    -}
    -
    -type Query_Order struct {
    -	Property             *string                `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
    -	Direction            *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
    -	XXX_unrecognized     []byte                 `json:"-"`
    -	XXX_sizecache        int32                  `json:"-"`
    -}
    -
    -func (m *Query_Order) Reset()         { *m = Query_Order{} }
    -func (m *Query_Order) String() string { return proto.CompactTextString(m) }
    -func (*Query_Order) ProtoMessage()    {}
    -func (*Query_Order) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
    -}
    -func (m *Query_Order) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Query_Order.Unmarshal(m, b)
    -}
    -func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
    -}
    -func (dst *Query_Order) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Query_Order.Merge(dst, src)
    -}
    -func (m *Query_Order) XXX_Size() int {
    -	return xxx_messageInfo_Query_Order.Size(m)
    -}
    -func (m *Query_Order) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Query_Order.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Query_Order proto.InternalMessageInfo
    -
    -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
    -
    -func (m *Query_Order) GetProperty() string {
    -	if m != nil && m.Property != nil {
    -		return *m.Property
    -	}
    -	return ""
    -}
    -
    -func (m *Query_Order) GetDirection() Query_Order_Direction {
    -	if m != nil && m.Direction != nil {
    -		return *m.Direction
    -	}
    -	return Default_Query_Order_Direction
    -}
    -
    -type CompiledQuery struct {
    -	Primaryscan          *CompiledQuery_PrimaryScan     `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
    -	Mergejoinscan        []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
    -	IndexDef             *Index                         `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
    -	Offset               *int32                         `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
    -	Limit                *int32                         `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
    -	KeysOnly             *bool                          `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
    -	PropertyName         []string                       `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
    -	DistinctInfixSize    *int32                         `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
    -	Entityfilter         *CompiledQuery_EntityFilter    `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
    -	XXX_unrecognized     []byte                         `json:"-"`
    -	XXX_sizecache        int32                          `json:"-"`
    -}
    -
    -func (m *CompiledQuery) Reset()         { *m = CompiledQuery{} }
    -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
    -func (*CompiledQuery) ProtoMessage()    {}
    -func (*CompiledQuery) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
    -}
    -func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
    -}
    -func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledQuery.Merge(dst, src)
    -}
    -func (m *CompiledQuery) XXX_Size() int {
    -	return xxx_messageInfo_CompiledQuery.Size(m)
    -}
    -func (m *CompiledQuery) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
    -
    -const Default_CompiledQuery_Offset int32 = 0
    -
    -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
    -	if m != nil {
    -		return m.Primaryscan
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
    -	if m != nil {
    -		return m.Mergejoinscan
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery) GetIndexDef() *Index {
    -	if m != nil {
    -		return m.IndexDef
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery) GetOffset() int32 {
    -	if m != nil && m.Offset != nil {
    -		return *m.Offset
    -	}
    -	return Default_CompiledQuery_Offset
    -}
    -
    -func (m *CompiledQuery) GetLimit() int32 {
    -	if m != nil && m.Limit != nil {
    -		return *m.Limit
    -	}
    -	return 0
    -}
    -
    -func (m *CompiledQuery) GetKeysOnly() bool {
    -	if m != nil && m.KeysOnly != nil {
    -		return *m.KeysOnly
    -	}
    -	return false
    -}
    -
    -func (m *CompiledQuery) GetPropertyName() []string {
    -	if m != nil {
    -		return m.PropertyName
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery) GetDistinctInfixSize() int32 {
    -	if m != nil && m.DistinctInfixSize != nil {
    -		return *m.DistinctInfixSize
    -	}
    -	return 0
    -}
    -
    -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
    -	if m != nil {
    -		return m.Entityfilter
    -	}
    -	return nil
    -}
    -
    -type CompiledQuery_PrimaryScan struct {
    -	IndexName                  *string  `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
    -	StartKey                   *string  `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
    -	StartInclusive             *bool    `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
    -	EndKey                     *string  `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
    -	EndInclusive               *bool    `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
    -	StartPostfixValue          []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
    -	EndPostfixValue            []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
    -	EndUnappliedLogTimestampUs *int64   `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
    -	XXX_NoUnkeyedLiteral       struct{} `json:"-"`
    -	XXX_unrecognized           []byte   `json:"-"`
    -	XXX_sizecache              int32    `json:"-"`
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) Reset()         { *m = CompiledQuery_PrimaryScan{} }
    -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
    -func (*CompiledQuery_PrimaryScan) ProtoMessage()    {}
    -func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
    -}
    -func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
    -}
    -func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
    -}
    -func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
    -	return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
    -}
    -func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
    -
    -func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
    -	if m != nil && m.IndexName != nil {
    -		return *m.IndexName
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
    -	if m != nil && m.StartKey != nil {
    -		return *m.StartKey
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
    -	if m != nil && m.StartInclusive != nil {
    -		return *m.StartInclusive
    -	}
    -	return false
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
    -	if m != nil && m.EndKey != nil {
    -		return *m.EndKey
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
    -	if m != nil && m.EndInclusive != nil {
    -		return *m.EndInclusive
    -	}
    -	return false
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
    -	if m != nil {
    -		return m.StartPostfixValue
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
    -	if m != nil {
    -		return m.EndPostfixValue
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
    -	if m != nil && m.EndUnappliedLogTimestampUs != nil {
    -		return *m.EndUnappliedLogTimestampUs
    -	}
    -	return 0
    -}
    -
    -type CompiledQuery_MergeJoinScan struct {
    -	IndexName            *string  `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
    -	PrefixValue          []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
    -	ValuePrefix          *bool    `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *CompiledQuery_MergeJoinScan) Reset()         { *m = CompiledQuery_MergeJoinScan{} }
    -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
    -func (*CompiledQuery_MergeJoinScan) ProtoMessage()    {}
    -func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
    -}
    -func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
    -}
    -func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
    -}
    -func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
    -	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
    -}
    -func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
    -
    -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
    -
    -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
    -	if m != nil && m.IndexName != nil {
    -		return *m.IndexName
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
    -	if m != nil {
    -		return m.PrefixValue
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
    -	if m != nil && m.ValuePrefix != nil {
    -		return *m.ValuePrefix
    -	}
    -	return Default_CompiledQuery_MergeJoinScan_ValuePrefix
    -}
    -
    -type CompiledQuery_EntityFilter struct {
    -	Distinct             *bool      `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
    -	Kind                 *string    `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
    -	Ancestor             *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
    -	XXX_unrecognized     []byte     `json:"-"`
    -	XXX_sizecache        int32      `json:"-"`
    -}
    -
    -func (m *CompiledQuery_EntityFilter) Reset()         { *m = CompiledQuery_EntityFilter{} }
    -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
    -func (*CompiledQuery_EntityFilter) ProtoMessage()    {}
    -func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
    -}
    -func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
    -}
    -func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
    -}
    -func (m *CompiledQuery_EntityFilter) XXX_Size() int {
    -	return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
    -}
    -func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
    -
    -const Default_CompiledQuery_EntityFilter_Distinct bool = false
    -
    -func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
    -	if m != nil && m.Distinct != nil {
    -		return *m.Distinct
    -	}
    -	return Default_CompiledQuery_EntityFilter_Distinct
    -}
    -
    -func (m *CompiledQuery_EntityFilter) GetKind() string {
    -	if m != nil && m.Kind != nil {
    -		return *m.Kind
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
    -	if m != nil {
    -		return m.Ancestor
    -	}
    -	return nil
    -}
    -
    -type CompiledCursor struct {
    -	Position             *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
    -	XXX_unrecognized     []byte                   `json:"-"`
    -	XXX_sizecache        int32                    `json:"-"`
    -}
    -
    -func (m *CompiledCursor) Reset()         { *m = CompiledCursor{} }
    -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
    -func (*CompiledCursor) ProtoMessage()    {}
    -func (*CompiledCursor) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
    -}
    -func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
    -}
    -func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledCursor.Merge(dst, src)
    -}
    -func (m *CompiledCursor) XXX_Size() int {
    -	return xxx_messageInfo_CompiledCursor.Size(m)
    -}
    -func (m *CompiledCursor) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
    -
    -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
    -	if m != nil {
    -		return m.Position
    -	}
    -	return nil
    -}
    -
    -type CompiledCursor_Position struct {
    -	StartKey             *string                               `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
    -	Indexvalue           []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
    -	Key                  *Reference                            `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
    -	StartInclusive       *bool                                 `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
    -	XXX_unrecognized     []byte                                `json:"-"`
    -	XXX_sizecache        int32                                 `json:"-"`
    -}
    -
    -func (m *CompiledCursor_Position) Reset()         { *m = CompiledCursor_Position{} }
    -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
    -func (*CompiledCursor_Position) ProtoMessage()    {}
    -func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
    -}
    -func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
    -}
    -func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
    -}
    -func (m *CompiledCursor_Position) XXX_Size() int {
    -	return xxx_messageInfo_CompiledCursor_Position.Size(m)
    -}
    -func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
    -
    -const Default_CompiledCursor_Position_StartInclusive bool = true
    -
    -func (m *CompiledCursor_Position) GetStartKey() string {
    -	if m != nil && m.StartKey != nil {
    -		return *m.StartKey
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
    -	if m != nil {
    -		return m.Indexvalue
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledCursor_Position) GetKey() *Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *CompiledCursor_Position) GetStartInclusive() bool {
    -	if m != nil && m.StartInclusive != nil {
    -		return *m.StartInclusive
    -	}
    -	return Default_CompiledCursor_Position_StartInclusive
    -}
    -
    -type CompiledCursor_Position_IndexValue struct {
    -	Property             *string        `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
    -	Value                *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
    -	XXX_unrecognized     []byte         `json:"-"`
    -	XXX_sizecache        int32          `json:"-"`
    -}
    -
    -func (m *CompiledCursor_Position_IndexValue) Reset()         { *m = CompiledCursor_Position_IndexValue{} }
    -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
    -func (*CompiledCursor_Position_IndexValue) ProtoMessage()    {}
    -func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
    -}
    -func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
    -}
    -func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
    -}
    -func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
    -}
    -func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
    -	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
    -}
    -func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
    -
    -func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
    -	if m != nil && m.Property != nil {
    -		return *m.Property
    -	}
    -	return ""
    -}
    -
    -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
    -	if m != nil {
    -		return m.Value
    -	}
    -	return nil
    -}
    -
    -type Cursor struct {
    -	Cursor               *uint64  `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
    -	App                  *string  `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Cursor) Reset()         { *m = Cursor{} }
    -func (m *Cursor) String() string { return proto.CompactTextString(m) }
    -func (*Cursor) ProtoMessage()    {}
    -func (*Cursor) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
    -}
    -func (m *Cursor) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Cursor.Unmarshal(m, b)
    -}
    -func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
    -}
    -func (dst *Cursor) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Cursor.Merge(dst, src)
    -}
    -func (m *Cursor) XXX_Size() int {
    -	return xxx_messageInfo_Cursor.Size(m)
    -}
    -func (m *Cursor) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Cursor.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Cursor proto.InternalMessageInfo
    -
    -func (m *Cursor) GetCursor() uint64 {
    -	if m != nil && m.Cursor != nil {
    -		return *m.Cursor
    -	}
    -	return 0
    -}
    -
    -func (m *Cursor) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -type Error struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Error) Reset()         { *m = Error{} }
    -func (m *Error) String() string { return proto.CompactTextString(m) }
    -func (*Error) ProtoMessage()    {}
    -func (*Error) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
    -}
    -func (m *Error) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Error.Unmarshal(m, b)
    -}
    -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Error.Marshal(b, m, deterministic)
    -}
    -func (dst *Error) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Error.Merge(dst, src)
    -}
    -func (m *Error) XXX_Size() int {
    -	return xxx_messageInfo_Error.Size(m)
    -}
    -func (m *Error) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Error.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Error proto.InternalMessageInfo
    -
    -type Cost struct {
    -	IndexWrites             *int32           `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
    -	IndexWriteBytes         *int32           `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
    -	EntityWrites            *int32           `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
    -	EntityWriteBytes        *int32           `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
    -	Commitcost              *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
    -	ApproximateStorageDelta *int32           `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
    -	IdSequenceUpdates       *int32           `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
    -	XXX_NoUnkeyedLiteral    struct{}         `json:"-"`
    -	XXX_unrecognized        []byte           `json:"-"`
    -	XXX_sizecache           int32            `json:"-"`
    -}
    -
    -func (m *Cost) Reset()         { *m = Cost{} }
    -func (m *Cost) String() string { return proto.CompactTextString(m) }
    -func (*Cost) ProtoMessage()    {}
    -func (*Cost) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
    -}
    -func (m *Cost) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Cost.Unmarshal(m, b)
    -}
    -func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
    -}
    -func (dst *Cost) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Cost.Merge(dst, src)
    -}
    -func (m *Cost) XXX_Size() int {
    -	return xxx_messageInfo_Cost.Size(m)
    -}
    -func (m *Cost) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Cost.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Cost proto.InternalMessageInfo
    -
    -func (m *Cost) GetIndexWrites() int32 {
    -	if m != nil && m.IndexWrites != nil {
    -		return *m.IndexWrites
    -	}
    -	return 0
    -}
    -
    -func (m *Cost) GetIndexWriteBytes() int32 {
    -	if m != nil && m.IndexWriteBytes != nil {
    -		return *m.IndexWriteBytes
    -	}
    -	return 0
    -}
    -
    -func (m *Cost) GetEntityWrites() int32 {
    -	if m != nil && m.EntityWrites != nil {
    -		return *m.EntityWrites
    -	}
    -	return 0
    -}
    -
    -func (m *Cost) GetEntityWriteBytes() int32 {
    -	if m != nil && m.EntityWriteBytes != nil {
    -		return *m.EntityWriteBytes
    -	}
    -	return 0
    -}
    -
    -func (m *Cost) GetCommitcost() *Cost_CommitCost {
    -	if m != nil {
    -		return m.Commitcost
    -	}
    -	return nil
    -}
    -
    -func (m *Cost) GetApproximateStorageDelta() int32 {
    -	if m != nil && m.ApproximateStorageDelta != nil {
    -		return *m.ApproximateStorageDelta
    -	}
    -	return 0
    -}
    -
    -func (m *Cost) GetIdSequenceUpdates() int32 {
    -	if m != nil && m.IdSequenceUpdates != nil {
    -		return *m.IdSequenceUpdates
    -	}
    -	return 0
    -}
    -
    -type Cost_CommitCost struct {
    -	RequestedEntityPuts    *int32   `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
    -	RequestedEntityDeletes *int32   `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
    -	XXX_NoUnkeyedLiteral   struct{} `json:"-"`
    -	XXX_unrecognized       []byte   `json:"-"`
    -	XXX_sizecache          int32    `json:"-"`
    -}
    -
    -func (m *Cost_CommitCost) Reset()         { *m = Cost_CommitCost{} }
    -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
    -func (*Cost_CommitCost) ProtoMessage()    {}
    -func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
    -}
    -func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
    -}
    -func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
    -}
    -func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
    -}
    -func (m *Cost_CommitCost) XXX_Size() int {
    -	return xxx_messageInfo_Cost_CommitCost.Size(m)
    -}
    -func (m *Cost_CommitCost) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
    -
    -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
    -	if m != nil && m.RequestedEntityPuts != nil {
    -		return *m.RequestedEntityPuts
    -	}
    -	return 0
    -}
    -
    -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
    -	if m != nil && m.RequestedEntityDeletes != nil {
    -		return *m.RequestedEntityDeletes
    -	}
    -	return 0
    -}
    -
    -type GetRequest struct {
    -	Header               *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
    -	Key                  []*Reference    `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
    -	Transaction          *Transaction    `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
    -	FailoverMs           *int64          `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
    -	Strong               *bool           `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
    -	AllowDeferred        *bool           `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *GetRequest) Reset()         { *m = GetRequest{} }
    -func (m *GetRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetRequest) ProtoMessage()    {}
    -func (*GetRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
    -}
    -func (m *GetRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetRequest.Unmarshal(m, b)
    -}
    -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetRequest.Merge(dst, src)
    -}
    -func (m *GetRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetRequest.Size(m)
    -}
    -func (m *GetRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetRequest proto.InternalMessageInfo
    -
    -const Default_GetRequest_AllowDeferred bool = false
    -
    -func (m *GetRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *GetRequest) GetKey() []*Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *GetRequest) GetTransaction() *Transaction {
    -	if m != nil {
    -		return m.Transaction
    -	}
    -	return nil
    -}
    -
    -func (m *GetRequest) GetFailoverMs() int64 {
    -	if m != nil && m.FailoverMs != nil {
    -		return *m.FailoverMs
    -	}
    -	return 0
    -}
    -
    -func (m *GetRequest) GetStrong() bool {
    -	if m != nil && m.Strong != nil {
    -		return *m.Strong
    -	}
    -	return false
    -}
    -
    -func (m *GetRequest) GetAllowDeferred() bool {
    -	if m != nil && m.AllowDeferred != nil {
    -		return *m.AllowDeferred
    -	}
    -	return Default_GetRequest_AllowDeferred
    -}
    -
    -type GetResponse struct {
    -	Entity               []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
    -	Deferred             []*Reference          `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
    -	InOrder              *bool                 `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
    -	XXX_unrecognized     []byte                `json:"-"`
    -	XXX_sizecache        int32                 `json:"-"`
    -}
    -
    -func (m *GetResponse) Reset()         { *m = GetResponse{} }
    -func (m *GetResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetResponse) ProtoMessage()    {}
    -func (*GetResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
    -}
    -func (m *GetResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetResponse.Unmarshal(m, b)
    -}
    -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetResponse.Merge(dst, src)
    -}
    -func (m *GetResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetResponse.Size(m)
    -}
    -func (m *GetResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetResponse proto.InternalMessageInfo
    -
    -const Default_GetResponse_InOrder bool = true
    -
    -func (m *GetResponse) GetEntity() []*GetResponse_Entity {
    -	if m != nil {
    -		return m.Entity
    -	}
    -	return nil
    -}
    -
    -func (m *GetResponse) GetDeferred() []*Reference {
    -	if m != nil {
    -		return m.Deferred
    -	}
    -	return nil
    -}
    -
    -func (m *GetResponse) GetInOrder() bool {
    -	if m != nil && m.InOrder != nil {
    -		return *m.InOrder
    -	}
    -	return Default_GetResponse_InOrder
    -}
    -
    -type GetResponse_Entity struct {
    -	Entity               *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
    -	Key                  *Reference   `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
    -	Version              *int64       `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    -	XXX_unrecognized     []byte       `json:"-"`
    -	XXX_sizecache        int32        `json:"-"`
    -}
    -
    -func (m *GetResponse_Entity) Reset()         { *m = GetResponse_Entity{} }
    -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
    -func (*GetResponse_Entity) ProtoMessage()    {}
    -func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
    -}
    -func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
    -}
    -func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
    -}
    -func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
    -}
    -func (m *GetResponse_Entity) XXX_Size() int {
    -	return xxx_messageInfo_GetResponse_Entity.Size(m)
    -}
    -func (m *GetResponse_Entity) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
    -
    -func (m *GetResponse_Entity) GetEntity() *EntityProto {
    -	if m != nil {
    -		return m.Entity
    -	}
    -	return nil
    -}
    -
    -func (m *GetResponse_Entity) GetKey() *Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *GetResponse_Entity) GetVersion() int64 {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return 0
    -}
    -
    -type PutRequest struct {
    -	Header               *InternalHeader          `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
    -	Entity               []*EntityProto           `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
    -	Transaction          *Transaction             `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
    -	CompositeIndex       []*CompositeIndex        `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
    -	Trusted              *bool                    `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
    -	Force                *bool                    `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
    -	MarkChanges          *bool                    `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
    -	Snapshot             []*Snapshot              `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
    -	AutoIdPolicy         *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
    -	XXX_unrecognized     []byte                   `json:"-"`
    -	XXX_sizecache        int32                    `json:"-"`
    -}
    -
    -func (m *PutRequest) Reset()         { *m = PutRequest{} }
    -func (m *PutRequest) String() string { return proto.CompactTextString(m) }
    -func (*PutRequest) ProtoMessage()    {}
    -func (*PutRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
    -}
    -func (m *PutRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PutRequest.Unmarshal(m, b)
    -}
    -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *PutRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PutRequest.Merge(dst, src)
    -}
    -func (m *PutRequest) XXX_Size() int {
    -	return xxx_messageInfo_PutRequest.Size(m)
    -}
    -func (m *PutRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PutRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PutRequest proto.InternalMessageInfo
    -
    -const Default_PutRequest_Trusted bool = false
    -const Default_PutRequest_Force bool = false
    -const Default_PutRequest_MarkChanges bool = false
    -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
    -
    -func (m *PutRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *PutRequest) GetEntity() []*EntityProto {
    -	if m != nil {
    -		return m.Entity
    -	}
    -	return nil
    -}
    -
    -func (m *PutRequest) GetTransaction() *Transaction {
    -	if m != nil {
    -		return m.Transaction
    -	}
    -	return nil
    -}
    -
    -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
    -	if m != nil {
    -		return m.CompositeIndex
    -	}
    -	return nil
    -}
    -
    -func (m *PutRequest) GetTrusted() bool {
    -	if m != nil && m.Trusted != nil {
    -		return *m.Trusted
    -	}
    -	return Default_PutRequest_Trusted
    -}
    -
    -func (m *PutRequest) GetForce() bool {
    -	if m != nil && m.Force != nil {
    -		return *m.Force
    -	}
    -	return Default_PutRequest_Force
    -}
    -
    -func (m *PutRequest) GetMarkChanges() bool {
    -	if m != nil && m.MarkChanges != nil {
    -		return *m.MarkChanges
    -	}
    -	return Default_PutRequest_MarkChanges
    -}
    -
    -func (m *PutRequest) GetSnapshot() []*Snapshot {
    -	if m != nil {
    -		return m.Snapshot
    -	}
    -	return nil
    -}
    -
    -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
    -	if m != nil && m.AutoIdPolicy != nil {
    -		return *m.AutoIdPolicy
    -	}
    -	return Default_PutRequest_AutoIdPolicy
    -}
    -
    -type PutResponse struct {
    -	Key                  []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
    -	Cost                 *Cost        `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
    -	Version              []int64      `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    -	XXX_unrecognized     []byte       `json:"-"`
    -	XXX_sizecache        int32        `json:"-"`
    -}
    -
    -func (m *PutResponse) Reset()         { *m = PutResponse{} }
    -func (m *PutResponse) String() string { return proto.CompactTextString(m) }
    -func (*PutResponse) ProtoMessage()    {}
    -func (*PutResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
    -}
    -func (m *PutResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_PutResponse.Unmarshal(m, b)
    -}
    -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *PutResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PutResponse.Merge(dst, src)
    -}
    -func (m *PutResponse) XXX_Size() int {
    -	return xxx_messageInfo_PutResponse.Size(m)
    -}
    -func (m *PutResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PutResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PutResponse proto.InternalMessageInfo
    -
    -func (m *PutResponse) GetKey() []*Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *PutResponse) GetCost() *Cost {
    -	if m != nil {
    -		return m.Cost
    -	}
    -	return nil
    -}
    -
    -func (m *PutResponse) GetVersion() []int64 {
    -	if m != nil {
    -		return m.Version
    -	}
    -	return nil
    -}
    -
    -type TouchRequest struct {
    -	Header               *InternalHeader   `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
    -	Key                  []*Reference      `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
    -	CompositeIndex       []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
    -	Force                *bool             `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
    -	Snapshot             []*Snapshot       `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *TouchRequest) Reset()         { *m = TouchRequest{} }
    -func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
    -func (*TouchRequest) ProtoMessage()    {}
    -func (*TouchRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
    -}
    -func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
    -}
    -func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *TouchRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_TouchRequest.Merge(dst, src)
    -}
    -func (m *TouchRequest) XXX_Size() int {
    -	return xxx_messageInfo_TouchRequest.Size(m)
    -}
    -func (m *TouchRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_TouchRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
    -
    -const Default_TouchRequest_Force bool = false
    -
    -func (m *TouchRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *TouchRequest) GetKey() []*Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
    -	if m != nil {
    -		return m.CompositeIndex
    -	}
    -	return nil
    -}
    -
    -func (m *TouchRequest) GetForce() bool {
    -	if m != nil && m.Force != nil {
    -		return *m.Force
    -	}
    -	return Default_TouchRequest_Force
    -}
    -
    -func (m *TouchRequest) GetSnapshot() []*Snapshot {
    -	if m != nil {
    -		return m.Snapshot
    -	}
    -	return nil
    -}
    -
    -type TouchResponse struct {
    -	Cost                 *Cost    `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *TouchResponse) Reset()         { *m = TouchResponse{} }
    -func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
    -func (*TouchResponse) ProtoMessage()    {}
    -func (*TouchResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
    -}
    -func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
    -}
    -func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *TouchResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_TouchResponse.Merge(dst, src)
    -}
    -func (m *TouchResponse) XXX_Size() int {
    -	return xxx_messageInfo_TouchResponse.Size(m)
    -}
    -func (m *TouchResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_TouchResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
    -
    -func (m *TouchResponse) GetCost() *Cost {
    -	if m != nil {
    -		return m.Cost
    -	}
    -	return nil
    -}
    -
    -type DeleteRequest struct {
    -	Header               *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
    -	Key                  []*Reference    `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
    -	Transaction          *Transaction    `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
    -	Trusted              *bool           `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
    -	Force                *bool           `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
    -	MarkChanges          *bool           `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
    -	Snapshot             []*Snapshot     `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *DeleteRequest) Reset()         { *m = DeleteRequest{} }
    -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
    -func (*DeleteRequest) ProtoMessage()    {}
    -func (*DeleteRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
    -}
    -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
    -}
    -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_DeleteRequest.Merge(dst, src)
    -}
    -func (m *DeleteRequest) XXX_Size() int {
    -	return xxx_messageInfo_DeleteRequest.Size(m)
    -}
    -func (m *DeleteRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
    -
    -const Default_DeleteRequest_Trusted bool = false
    -const Default_DeleteRequest_Force bool = false
    -const Default_DeleteRequest_MarkChanges bool = false
    -
    -func (m *DeleteRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *DeleteRequest) GetKey() []*Reference {
    -	if m != nil {
    -		return m.Key
    -	}
    -	return nil
    -}
    -
    -func (m *DeleteRequest) GetTransaction() *Transaction {
    -	if m != nil {
    -		return m.Transaction
    -	}
    -	return nil
    -}
    -
    -func (m *DeleteRequest) GetTrusted() bool {
    -	if m != nil && m.Trusted != nil {
    -		return *m.Trusted
    -	}
    -	return Default_DeleteRequest_Trusted
    -}
    -
    -func (m *DeleteRequest) GetForce() bool {
    -	if m != nil && m.Force != nil {
    -		return *m.Force
    -	}
    -	return Default_DeleteRequest_Force
    -}
    -
    -func (m *DeleteRequest) GetMarkChanges() bool {
    -	if m != nil && m.MarkChanges != nil {
    -		return *m.MarkChanges
    -	}
    -	return Default_DeleteRequest_MarkChanges
    -}
    -
    -func (m *DeleteRequest) GetSnapshot() []*Snapshot {
    -	if m != nil {
    -		return m.Snapshot
    -	}
    -	return nil
    -}
    -
    -type DeleteResponse struct {
    -	Cost                 *Cost    `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
    -	Version              []int64  `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *DeleteResponse) Reset()         { *m = DeleteResponse{} }
    -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
    -func (*DeleteResponse) ProtoMessage()    {}
    -func (*DeleteResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
    -}
    -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
    -}
    -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_DeleteResponse.Merge(dst, src)
    -}
    -func (m *DeleteResponse) XXX_Size() int {
    -	return xxx_messageInfo_DeleteResponse.Size(m)
    -}
    -func (m *DeleteResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
    -
    -func (m *DeleteResponse) GetCost() *Cost {
    -	if m != nil {
    -		return m.Cost
    -	}
    -	return nil
    -}
    -
    -func (m *DeleteResponse) GetVersion() []int64 {
    -	if m != nil {
    -		return m.Version
    -	}
    -	return nil
    -}
    -
    -type NextRequest struct {
    -	Header               *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
    -	Cursor               *Cursor         `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
    -	Count                *int32          `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
    -	Offset               *int32          `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
    -	Compile              *bool           `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *NextRequest) Reset()         { *m = NextRequest{} }
    -func (m *NextRequest) String() string { return proto.CompactTextString(m) }
    -func (*NextRequest) ProtoMessage()    {}
    -func (*NextRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
    -}
    -func (m *NextRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_NextRequest.Unmarshal(m, b)
    -}
    -func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *NextRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NextRequest.Merge(dst, src)
    -}
    -func (m *NextRequest) XXX_Size() int {
    -	return xxx_messageInfo_NextRequest.Size(m)
    -}
    -func (m *NextRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NextRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NextRequest proto.InternalMessageInfo
    -
    -const Default_NextRequest_Offset int32 = 0
    -const Default_NextRequest_Compile bool = false
    -
    -func (m *NextRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *NextRequest) GetCursor() *Cursor {
    -	if m != nil {
    -		return m.Cursor
    -	}
    -	return nil
    -}
    -
    -func (m *NextRequest) GetCount() int32 {
    -	if m != nil && m.Count != nil {
    -		return *m.Count
    -	}
    -	return 0
    -}
    -
    -func (m *NextRequest) GetOffset() int32 {
    -	if m != nil && m.Offset != nil {
    -		return *m.Offset
    -	}
    -	return Default_NextRequest_Offset
    -}
    -
    -func (m *NextRequest) GetCompile() bool {
    -	if m != nil && m.Compile != nil {
    -		return *m.Compile
    -	}
    -	return Default_NextRequest_Compile
    -}
    -
    -type QueryResult struct {
    -	Cursor               *Cursor           `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
    -	Result               []*EntityProto    `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
    -	SkippedResults       *int32            `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
    -	MoreResults          *bool             `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
    -	KeysOnly             *bool             `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
    -	IndexOnly            *bool             `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
    -	SmallOps             *bool             `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
    -	CompiledQuery        *CompiledQuery    `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
    -	CompiledCursor       *CompiledCursor   `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
    -	Index                []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
    -	Version              []int64           `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *QueryResult) Reset()         { *m = QueryResult{} }
    -func (m *QueryResult) String() string { return proto.CompactTextString(m) }
    -func (*QueryResult) ProtoMessage()    {}
    -func (*QueryResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
    -}
    -func (m *QueryResult) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_QueryResult.Unmarshal(m, b)
    -}
    -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
    -}
    -func (dst *QueryResult) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_QueryResult.Merge(dst, src)
    -}
    -func (m *QueryResult) XXX_Size() int {
    -	return xxx_messageInfo_QueryResult.Size(m)
    -}
    -func (m *QueryResult) XXX_DiscardUnknown() {
    -	xxx_messageInfo_QueryResult.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_QueryResult proto.InternalMessageInfo
    -
    -func (m *QueryResult) GetCursor() *Cursor {
    -	if m != nil {
    -		return m.Cursor
    -	}
    -	return nil
    -}
    -
    -func (m *QueryResult) GetResult() []*EntityProto {
    -	if m != nil {
    -		return m.Result
    -	}
    -	return nil
    -}
    -
    -func (m *QueryResult) GetSkippedResults() int32 {
    -	if m != nil && m.SkippedResults != nil {
    -		return *m.SkippedResults
    -	}
    -	return 0
    -}
    -
    -func (m *QueryResult) GetMoreResults() bool {
    -	if m != nil && m.MoreResults != nil {
    -		return *m.MoreResults
    -	}
    -	return false
    -}
    -
    -func (m *QueryResult) GetKeysOnly() bool {
    -	if m != nil && m.KeysOnly != nil {
    -		return *m.KeysOnly
    -	}
    -	return false
    -}
    -
    -func (m *QueryResult) GetIndexOnly() bool {
    -	if m != nil && m.IndexOnly != nil {
    -		return *m.IndexOnly
    -	}
    -	return false
    -}
    -
    -func (m *QueryResult) GetSmallOps() bool {
    -	if m != nil && m.SmallOps != nil {
    -		return *m.SmallOps
    -	}
    -	return false
    -}
    -
    -func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
    -	if m != nil {
    -		return m.CompiledQuery
    -	}
    -	return nil
    -}
    -
    -func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
    -	if m != nil {
    -		return m.CompiledCursor
    -	}
    -	return nil
    -}
    -
    -func (m *QueryResult) GetIndex() []*CompositeIndex {
    -	if m != nil {
    -		return m.Index
    -	}
    -	return nil
    -}
    -
    -func (m *QueryResult) GetVersion() []int64 {
    -	if m != nil {
    -		return m.Version
    -	}
    -	return nil
    -}
    -
    -type AllocateIdsRequest struct {
    -	Header               *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
    -	ModelKey             *Reference      `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
    -	Size                 *int64          `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
    -	Max                  *int64          `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
    -	Reserve              []*Reference    `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }
    -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
    -func (*AllocateIdsRequest) ProtoMessage()    {}
    -func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
    -}
    -func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
    -}
    -func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
    -}
    -func (m *AllocateIdsRequest) XXX_Size() int {
    -	return xxx_messageInfo_AllocateIdsRequest.Size(m)
    -}
    -func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
    -
    -func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *AllocateIdsRequest) GetModelKey() *Reference {
    -	if m != nil {
    -		return m.ModelKey
    -	}
    -	return nil
    -}
    -
    -func (m *AllocateIdsRequest) GetSize() int64 {
    -	if m != nil && m.Size != nil {
    -		return *m.Size
    -	}
    -	return 0
    -}
    -
    -func (m *AllocateIdsRequest) GetMax() int64 {
    -	if m != nil && m.Max != nil {
    -		return *m.Max
    -	}
    -	return 0
    -}
    -
    -func (m *AllocateIdsRequest) GetReserve() []*Reference {
    -	if m != nil {
    -		return m.Reserve
    -	}
    -	return nil
    -}
    -
    -type AllocateIdsResponse struct {
    -	Start                *int64   `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
    -	End                  *int64   `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
    -	Cost                 *Cost    `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }
    -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
    -func (*AllocateIdsResponse) ProtoMessage()    {}
    -func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
    -}
    -func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
    -}
    -func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
    -}
    -func (m *AllocateIdsResponse) XXX_Size() int {
    -	return xxx_messageInfo_AllocateIdsResponse.Size(m)
    -}
    -func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
    -
    -func (m *AllocateIdsResponse) GetStart() int64 {
    -	if m != nil && m.Start != nil {
    -		return *m.Start
    -	}
    -	return 0
    -}
    -
    -func (m *AllocateIdsResponse) GetEnd() int64 {
    -	if m != nil && m.End != nil {
    -		return *m.End
    -	}
    -	return 0
    -}
    -
    -func (m *AllocateIdsResponse) GetCost() *Cost {
    -	if m != nil {
    -		return m.Cost
    -	}
    -	return nil
    -}
    -
    -type CompositeIndices struct {
    -	Index                []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *CompositeIndices) Reset()         { *m = CompositeIndices{} }
    -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
    -func (*CompositeIndices) ProtoMessage()    {}
    -func (*CompositeIndices) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
    -}
    -func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
    -}
    -func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
    -}
    -func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CompositeIndices.Merge(dst, src)
    -}
    -func (m *CompositeIndices) XXX_Size() int {
    -	return xxx_messageInfo_CompositeIndices.Size(m)
    -}
    -func (m *CompositeIndices) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
    -
    -func (m *CompositeIndices) GetIndex() []*CompositeIndex {
    -	if m != nil {
    -		return m.Index
    -	}
    -	return nil
    -}
    -
    -type AddActionsRequest struct {
    -	Header               *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
    -	Transaction          *Transaction    `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
    -	Action               []*Action       `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
    -	XXX_unrecognized     []byte          `json:"-"`
    -	XXX_sizecache        int32           `json:"-"`
    -}
    -
    -func (m *AddActionsRequest) Reset()         { *m = AddActionsRequest{} }
    -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
    -func (*AddActionsRequest) ProtoMessage()    {}
    -func (*AddActionsRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
    -}
    -func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
    -}
    -func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AddActionsRequest.Merge(dst, src)
    -}
    -func (m *AddActionsRequest) XXX_Size() int {
    -	return xxx_messageInfo_AddActionsRequest.Size(m)
    -}
    -func (m *AddActionsRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
    -
    -func (m *AddActionsRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *AddActionsRequest) GetTransaction() *Transaction {
    -	if m != nil {
    -		return m.Transaction
    -	}
    -	return nil
    -}
    -
    -func (m *AddActionsRequest) GetAction() []*Action {
    -	if m != nil {
    -		return m.Action
    -	}
    -	return nil
    -}
    -
    -type AddActionsResponse struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *AddActionsResponse) Reset()         { *m = AddActionsResponse{} }
    -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
    -func (*AddActionsResponse) ProtoMessage()    {}
    -func (*AddActionsResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
    -}
    -func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
    -}
    -func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AddActionsResponse.Merge(dst, src)
    -}
    -func (m *AddActionsResponse) XXX_Size() int {
    -	return xxx_messageInfo_AddActionsResponse.Size(m)
    -}
    -func (m *AddActionsResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
    -
    -type BeginTransactionRequest struct {
    -	Header               *InternalHeader                          `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
    -	App                  *string                                  `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
    -	AllowMultipleEg      *bool                                    `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
    -	DatabaseId           *string                                  `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
    -	Mode                 *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
    -	PreviousTransaction  *Transaction                             `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                                 `json:"-"`
    -	XXX_unrecognized     []byte                                   `json:"-"`
    -	XXX_sizecache        int32                                    `json:"-"`
    -}
    -
    -func (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }
    -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
    -func (*BeginTransactionRequest) ProtoMessage()    {}
    -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
    -}
    -func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
    -}
    -func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
    -}
    -func (m *BeginTransactionRequest) XXX_Size() int {
    -	return xxx_messageInfo_BeginTransactionRequest.Size(m)
    -}
    -func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
    -
    -const Default_BeginTransactionRequest_AllowMultipleEg bool = false
    -const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
    -
    -func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *BeginTransactionRequest) GetApp() string {
    -	if m != nil && m.App != nil {
    -		return *m.App
    -	}
    -	return ""
    -}
    -
    -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
    -	if m != nil && m.AllowMultipleEg != nil {
    -		return *m.AllowMultipleEg
    -	}
    -	return Default_BeginTransactionRequest_AllowMultipleEg
    -}
    -
    -func (m *BeginTransactionRequest) GetDatabaseId() string {
    -	if m != nil && m.DatabaseId != nil {
    -		return *m.DatabaseId
    -	}
    -	return ""
    -}
    -
    -func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
    -	if m != nil && m.Mode != nil {
    -		return *m.Mode
    -	}
    -	return Default_BeginTransactionRequest_Mode
    -}
    -
    -func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
    -	if m != nil {
    -		return m.PreviousTransaction
    -	}
    -	return nil
    -}
    -
    -type CommitResponse struct {
    -	Cost                 *Cost                     `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
    -	Version              []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
    -	XXX_unrecognized     []byte                    `json:"-"`
    -	XXX_sizecache        int32                     `json:"-"`
    -}
    -
    -func (m *CommitResponse) Reset()         { *m = CommitResponse{} }
    -func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
    -func (*CommitResponse) ProtoMessage()    {}
    -func (*CommitResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
    -}
    -func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
    -}
    -func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *CommitResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CommitResponse.Merge(dst, src)
    -}
    -func (m *CommitResponse) XXX_Size() int {
    -	return xxx_messageInfo_CommitResponse.Size(m)
    -}
    -func (m *CommitResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CommitResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
    -
    -func (m *CommitResponse) GetCost() *Cost {
    -	if m != nil {
    -		return m.Cost
    -	}
    -	return nil
    -}
    -
    -func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
    -	if m != nil {
    -		return m.Version
    -	}
    -	return nil
    -}
    -
    -type CommitResponse_Version struct {
    -	RootEntityKey        *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
    -	Version              *int64     `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
    -	XXX_unrecognized     []byte     `json:"-"`
    -	XXX_sizecache        int32      `json:"-"`
    -}
    -
    -func (m *CommitResponse_Version) Reset()         { *m = CommitResponse_Version{} }
    -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
    -func (*CommitResponse_Version) ProtoMessage()    {}
    -func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
    -}
    -func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
    -}
    -func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
    -}
    -func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
    -}
    -func (m *CommitResponse_Version) XXX_Size() int {
    -	return xxx_messageInfo_CommitResponse_Version.Size(m)
    -}
    -func (m *CommitResponse_Version) XXX_DiscardUnknown() {
    -	xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
    -
    -func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
    -	if m != nil {
    -		return m.RootEntityKey
    -	}
    -	return nil
    -}
    -
    -func (m *CommitResponse_Version) GetVersion() int64 {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return 0
    -}
    -
    -func init() {
    -	proto.RegisterType((*Action)(nil), "appengine.Action")
    -	proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
    -	proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
    -	proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
    -	proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
    -	proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
    -	proto.RegisterType((*Property)(nil), "appengine.Property")
    -	proto.RegisterType((*Path)(nil), "appengine.Path")
    -	proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
    -	proto.RegisterType((*Reference)(nil), "appengine.Reference")
    -	proto.RegisterType((*User)(nil), "appengine.User")
    -	proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
    -	proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
    -	proto.RegisterType((*Index)(nil), "appengine.Index")
    -	proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
    -	proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
    -	proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
    -	proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
    -	proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
    -	proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
    -	proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
    -	proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
    -	proto.RegisterType((*Query)(nil), "appengine.Query")
    -	proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
    -	proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
    -	proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
    -	proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
    -	proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
    -	proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
    -	proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
    -	proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
    -	proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
    -	proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
    -	proto.RegisterType((*Error)(nil), "appengine.Error")
    -	proto.RegisterType((*Cost)(nil), "appengine.Cost")
    -	proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
    -	proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
    -	proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
    -	proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
    -	proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
    -	proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
    -	proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
    -	proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
    -	proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
    -	proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
    -	proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
    -	proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
    -	proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
    -	proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
    -	proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
    -	proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
    -	proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
    -	proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
    -	proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
    -	proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
    -}
    -
    -var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
    -	// 4156 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
    -	0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
    -	0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
    -	0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
    -	0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
    -	0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
    -	0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
    -	0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
    -	0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
    -	0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
    -	0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
    -	0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
    -	0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
    -	0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
    -	0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
    -	0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
    -	0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
    -	0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
    -	0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
    -	0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
    -	0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
    -	0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
    -	0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
    -	0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
    -	0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
    -	0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
    -	0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
    -	0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
    -	0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
    -	0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
    -	0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
    -	0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
    -	0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
    -	0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
    -	0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
    -	0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
    -	0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
    -	0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
    -	0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
    -	0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
    -	0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
    -	0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
    -	0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
    -	0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
    -	0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
    -	0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
    -	0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
    -	0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
    -	0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
    -	0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
    -	0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
    -	0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
    -	0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
    -	0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
    -	0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
    -	0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
    -	0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
    -	0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
    -	0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
    -	0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
    -	0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
    -	0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
    -	0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
    -	0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
    -	0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
    -	0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
    -	0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
    -	0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
    -	0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
    -	0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
    -	0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
    -	0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
    -	0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
    -	0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
    -	0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
    -	0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
    -	0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
    -	0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
    -	0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
    -	0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
    -	0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
    -	0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
    -	0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
    -	0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
    -	0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
    -	0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
    -	0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
    -	0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
    -	0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
    -	0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
    -	0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
    -	0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
    -	0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
    -	0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
    -	0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
    -	0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
    -	0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
    -	0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
    -	0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
    -	0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
    -	0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
    -	0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
    -	0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
    -	0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
    -	0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
    -	0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
    -	0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
    -	0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
    -	0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
    -	0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
    -	0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
    -	0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
    -	0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
    -	0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
    -	0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
    -	0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
    -	0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
    -	0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
    -	0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
    -	0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
    -	0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
    -	0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
    -	0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
    -	0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
    -	0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
    -	0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
    -	0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
    -	0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
    -	0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
    -	0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
    -	0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
    -	0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
    -	0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
    -	0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
    -	0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
    -	0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
    -	0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
    -	0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
    -	0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
    -	0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
    -	0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
    -	0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
    -	0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
    -	0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
    -	0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
    -	0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
    -	0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
    -	0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
    -	0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
    -	0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
    -	0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
    -	0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
    -	0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
    -	0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
    -	0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
    -	0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
    -	0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
    -	0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
    -	0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
    -	0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
    -	0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
    -	0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
    -	0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
    -	0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
    -	0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
    -	0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
    -	0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
    -	0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
    -	0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
    -	0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
    -	0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
    -	0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
    -	0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
    -	0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
    -	0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
    -	0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
    -	0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
    -	0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
    -	0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
    -	0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
    -	0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
    -	0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
    -	0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
    -	0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
    -	0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
    -	0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
    -	0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
    -	0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
    -	0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
    -	0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
    -	0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
    -	0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
    -	0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
    -	0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
    -	0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
    -	0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
    -	0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
    -	0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
    -	0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
    -	0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
    -	0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
    -	0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
    -	0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
    -	0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
    -	0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
    -	0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
    -	0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
    -	0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
    -	0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
    -	0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
    -	0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
    -	0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
    -	0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
    -	0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
    -	0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
    -	0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
    -	0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
    -	0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
    -	0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
    -	0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
    -	0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
    -	0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
    -	0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
    -	0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
    -	0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
    -	0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
    -	0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
    -	0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
    -	0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
    -	0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
    -	0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
    -	0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
    -	0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
    -	0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
    -	0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
    -	0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
    -	0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
    -	0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
    -	0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
    -	0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
    -	0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
    -	0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
    -	0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
    -	0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
    -	0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
    -	0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
    -	0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
    -	0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
    -	0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
    -	0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
    -	0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
    -	0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
    -	0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
    -	0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
    -	0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
    -	0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
    -	0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
    -	0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
    -	0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
    -	0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
    deleted file mode 100644
    index 497b4d9a9a..0000000000
    --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
    +++ /dev/null
    @@ -1,551 +0,0 @@
    -syntax = "proto2";
    -option go_package = "datastore";
    -
    -package appengine;
    -
    -message Action{}
    -
    -message PropertyValue {
    -  optional int64 int64Value = 1;
    -  optional bool booleanValue = 2;
    -  optional string stringValue = 3;
    -  optional double doubleValue = 4;
    -
    -  optional group PointValue = 5 {
    -    required double x = 6;
    -    required double y = 7;
    -  }
    -
    -  optional group UserValue = 8 {
    -    required string email = 9;
    -    required string auth_domain = 10;
    -    optional string nickname = 11;
    -    optional string federated_identity = 21;
    -    optional string federated_provider = 22;
    -  }
    -
    -  optional group ReferenceValue = 12 {
    -    required string app = 13;
    -    optional string name_space = 20;
    -    repeated group PathElement = 14 {
    -      required string type = 15;
    -      optional int64 id = 16;
    -      optional string name = 17;
    -    }
    -  }
    -}
    -
    -message Property {
    -  enum Meaning {
    -    NO_MEANING = 0;
    -    BLOB = 14;
    -    TEXT = 15;
    -    BYTESTRING = 16;
    -
    -    ATOM_CATEGORY = 1;
    -    ATOM_LINK = 2;
    -    ATOM_TITLE = 3;
    -    ATOM_CONTENT = 4;
    -    ATOM_SUMMARY = 5;
    -    ATOM_AUTHOR = 6;
    -
    -    GD_WHEN = 7;
    -    GD_EMAIL = 8;
    -    GEORSS_POINT = 9;
    -    GD_IM = 10;
    -
    -    GD_PHONENUMBER = 11;
    -    GD_POSTALADDRESS = 12;
    -
    -    GD_RATING = 13;
    -
    -    BLOBKEY = 17;
    -    ENTITY_PROTO = 19;
    -
    -    INDEX_VALUE = 18;
    -  };
    -
    -  optional Meaning meaning = 1 [default = NO_MEANING];
    -  optional string meaning_uri = 2;
    -
    -  required string name = 3;
    -
    -  required PropertyValue value = 5;
    -
    -  required bool multiple = 4;
    -
    -  optional bool searchable = 6 [default=false];
    -
    -  enum FtsTokenizationOption {
    -    HTML = 1;
    -    ATOM = 2;
    -  }
    -
    -  optional FtsTokenizationOption fts_tokenization_option = 8;
    -
    -  optional string locale = 9 [default = "en"];
    -}
    -
    -message Path {
    -  repeated group Element = 1 {
    -    required string type = 2;
    -    optional int64 id = 3;
    -    optional string name = 4;
    -  }
    -}
    -
    -message Reference {
    -  required string app = 13;
    -  optional string name_space = 20;
    -  required Path path = 14;
    -}
    -
    -message User {
    -  required string email = 1;
    -  required string auth_domain = 2;
    -  optional string nickname = 3;
    -  optional string federated_identity = 6;
    -  optional string federated_provider = 7;
    -}
    -
    -message EntityProto {
    -  required Reference key = 13;
    -  required Path entity_group = 16;
    -  optional User owner = 17;
    -
    -  enum Kind {
    -    GD_CONTACT = 1;
    -    GD_EVENT = 2;
    -    GD_MESSAGE = 3;
    -  }
    -  optional Kind kind = 4;
    -  optional string kind_uri = 5;
    -
    -  repeated Property property = 14;
    -  repeated Property raw_property = 15;
    -
    -  optional int32 rank = 18;
    -}
    -
    -message CompositeProperty {
    -  required int64 index_id = 1;
    -  repeated string value = 2;
    -}
    -
    -message Index {
    -  required string entity_type = 1;
    -  required bool ancestor = 5;
    -  repeated group Property = 2 {
    -    required string name = 3;
    -    enum Direction {
    -      ASCENDING = 1;
    -      DESCENDING = 2;
    -    }
    -    optional Direction direction = 4 [default = ASCENDING];
    -  }
    -}
    -
    -message CompositeIndex {
    -  required string app_id = 1;
    -  required int64 id = 2;
    -  required Index definition = 3;
    -
    -  enum State {
    -    WRITE_ONLY = 1;
    -    READ_WRITE = 2;
    -    DELETED = 3;
    -    ERROR = 4;
    -  }
    -  required State state = 4;
    -
    -  optional bool only_use_if_required = 6 [default = false];
    -}
    -
    -message IndexPostfix {
    -  message IndexValue {
    -    required string property_name = 1;
    -    required PropertyValue value = 2;
    -  }
    -
    -  repeated IndexValue index_value = 1;
    -
    -  optional Reference key = 2;
    -
    -  optional bool before = 3 [default=true];
    -}
    -
    -message IndexPosition {
    -  optional string key = 1;
    -
    -  optional bool before = 2 [default=true];
    -}
    -
    -message Snapshot {
    -  enum Status {
    -    INACTIVE = 0;
    -    ACTIVE = 1;
    -  }
    -
    -  required int64 ts = 1;
    -}
    -
    -message InternalHeader {
    -  optional string qos = 1;
    -}
    -
    -message Transaction {
    -  optional InternalHeader header = 4;
    -  required fixed64 handle = 1;
    -  required string app = 2;
    -  optional bool mark_changes = 3 [default = false];
    -}
    -
    -message Query {
    -  optional InternalHeader header = 39;
    -
    -  required string app = 1;
    -  optional string name_space = 29;
    -
    -  optional string kind = 3;
    -  optional Reference ancestor = 17;
    -
    -  repeated group Filter = 4 {
    -    enum Operator {
    -      LESS_THAN = 1;
    -      LESS_THAN_OR_EQUAL = 2;
    -      GREATER_THAN = 3;
    -      GREATER_THAN_OR_EQUAL = 4;
    -      EQUAL = 5;
    -      IN = 6;
    -      EXISTS = 7;
    -    }
    -
    -    required Operator op = 6;
    -    repeated Property property = 14;
    -  }
    -
    -  optional string search_query = 8;
    -
    -  repeated group Order = 9 {
    -    enum Direction {
    -      ASCENDING = 1;
    -      DESCENDING = 2;
    -    }
    -
    -    required string property = 10;
    -    optional Direction direction = 11 [default = ASCENDING];
    -  }
    -
    -  enum Hint {
    -    ORDER_FIRST = 1;
    -    ANCESTOR_FIRST = 2;
    -    FILTER_FIRST = 3;
    -  }
    -  optional Hint hint = 18;
    -
    -  optional int32 count = 23;
    -
    -  optional int32 offset = 12 [default = 0];
    -
    -  optional int32 limit = 16;
    -
    -  optional CompiledCursor compiled_cursor = 30;
    -  optional CompiledCursor end_compiled_cursor = 31;
    -
    -  repeated CompositeIndex composite_index = 19;
    -
    -  optional bool require_perfect_plan = 20 [default = false];
    -
    -  optional bool keys_only = 21 [default = false];
    -
    -  optional Transaction transaction = 22;
    -
    -  optional bool compile = 25 [default = false];
    -
    -  optional int64 failover_ms = 26;
    -
    -  optional bool strong = 32;
    -
    -  repeated string property_name = 33;
    -
    -  repeated string group_by_property_name = 34;
    -
    -  optional bool distinct = 24;
    -
    -  optional int64 min_safe_time_seconds = 35;
    -
    -  repeated string safe_replica_name = 36;
    -
    -  optional bool persist_offset = 37 [default=false];
    -}
    -
    -message CompiledQuery {
    -  required group PrimaryScan = 1 {
    -    optional string index_name = 2;
    -
    -    optional string start_key = 3;
    -    optional bool start_inclusive = 4;
    -    optional string end_key = 5;
    -    optional bool end_inclusive = 6;
    -
    -    repeated string start_postfix_value = 22;
    -    repeated string end_postfix_value = 23;
    -
    -    optional int64 end_unapplied_log_timestamp_us = 19;
    -  }
    -
    -  repeated group MergeJoinScan = 7 {
    -    required string index_name = 8;
    -
    -    repeated string prefix_value = 9;
    -
    -    optional bool value_prefix = 20 [default=false];
    -  }
    -
    -  optional Index index_def = 21;
    -
    -  optional int32 offset = 10 [default = 0];
    -
    -  optional int32 limit = 11;
    -
    -  required bool keys_only = 12;
    -
    -  repeated string property_name = 24;
    -
    -  optional int32 distinct_infix_size = 25;
    -
    -  optional group EntityFilter = 13 {
    -    optional bool distinct = 14 [default=false];
    -
    -    optional string kind = 17;
    -    optional Reference ancestor = 18;
    -  }
    -}
    -
    -message CompiledCursor {
    -  optional group Position = 2 {
    -    optional string start_key = 27;
    -
    -    repeated group IndexValue = 29 {
    -      optional string property = 30;
    -      required PropertyValue value = 31;
    -    }
    -
    -    optional Reference key = 32;
    -
    -    optional bool start_inclusive = 28 [default=true];
    -  }
    -}
    -
    -message Cursor {
    -  required fixed64 cursor = 1;
    -
    -  optional string app = 2;
    -}
    -
    -message Error {
    -  enum ErrorCode {
    -    BAD_REQUEST = 1;
    -    CONCURRENT_TRANSACTION = 2;
    -    INTERNAL_ERROR = 3;
    -    NEED_INDEX = 4;
    -    TIMEOUT = 5;
    -    PERMISSION_DENIED = 6;
    -    BIGTABLE_ERROR = 7;
    -    COMMITTED_BUT_STILL_APPLYING = 8;
    -    CAPABILITY_DISABLED = 9;
    -    TRY_ALTERNATE_BACKEND = 10;
    -    SAFE_TIME_TOO_OLD = 11;
    -  }
    -}
    -
    -message Cost {
    -  optional int32 index_writes = 1;
    -  optional int32 index_write_bytes = 2;
    -  optional int32 entity_writes = 3;
    -  optional int32 entity_write_bytes = 4;
    -  optional group CommitCost = 5 {
    -    optional int32 requested_entity_puts = 6;
    -    optional int32 requested_entity_deletes = 7;
    -  };
    -  optional int32 approximate_storage_delta = 8;
    -  optional int32 id_sequence_updates = 9;
    -}
    -
    -message GetRequest {
    -  optional InternalHeader header = 6;
    -
    -  repeated Reference key = 1;
    -  optional Transaction transaction = 2;
    -
    -  optional int64 failover_ms = 3;
    -
    -  optional bool strong = 4;
    -
    -  optional bool allow_deferred = 5 [default=false];
    -}
    -
    -message GetResponse {
    -  repeated group Entity = 1 {
    -    optional EntityProto entity = 2;
    -    optional Reference key = 4;
    -
    -    optional int64 version = 3;
    -  }
    -
    -  repeated Reference deferred = 5;
    -
    -  optional bool in_order = 6 [default=true];
    -}
    -
    -message PutRequest {
    -  optional InternalHeader header = 11;
    -
    -  repeated EntityProto entity = 1;
    -  optional Transaction transaction = 2;
    -  repeated CompositeIndex composite_index = 3;
    -
    -  optional bool trusted = 4 [default = false];
    -
    -  optional bool force = 7 [default = false];
    -
    -  optional bool mark_changes = 8 [default = false];
    -  repeated Snapshot snapshot = 9;
    -
    -  enum AutoIdPolicy {
    -    CURRENT = 0;
    -    SEQUENTIAL = 1;
    -  }
    -  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
    -}
    -
    -message PutResponse {
    -  repeated Reference key = 1;
    -  optional Cost cost = 2;
    -  repeated int64 version = 3;
    -}
    -
    -message TouchRequest {
    -  optional InternalHeader header = 10;
    -
    -  repeated Reference key = 1;
    -  repeated CompositeIndex composite_index = 2;
    -  optional bool force = 3 [default = false];
    -  repeated Snapshot snapshot = 9;
    -}
    -
    -message TouchResponse {
    -  optional Cost cost = 1;
    -}
    -
    -message DeleteRequest {
    -  optional InternalHeader header = 10;
    -
    -  repeated Reference key = 6;
    -  optional Transaction transaction = 5;
    -
    -  optional bool trusted = 4 [default = false];
    -
    -  optional bool force = 7 [default = false];
    -
    -  optional bool mark_changes = 8 [default = false];
    -  repeated Snapshot snapshot = 9;
    -}
    -
    -message DeleteResponse {
    -  optional Cost cost = 1;
    -  repeated int64 version = 3;
    -}
    -
    -message NextRequest {
    -  optional InternalHeader header = 5;
    -
    -  required Cursor cursor = 1;
    -  optional int32 count = 2;
    -
    -  optional int32 offset = 4 [default = 0];
    -
    -  optional bool compile = 3 [default = false];
    -}
    -
    -message QueryResult {
    -  optional Cursor cursor = 1;
    -
    -  repeated EntityProto result = 2;
    -
    -  optional int32 skipped_results = 7;
    -
    -  required bool more_results = 3;
    -
    -  optional bool keys_only = 4;
    -
    -  optional bool index_only = 9;
    -
    -  optional bool small_ops = 10;
    -
    -  optional CompiledQuery compiled_query = 5;
    -
    -  optional CompiledCursor compiled_cursor = 6;
    -
    -  repeated CompositeIndex index = 8;
    -
    -  repeated int64 version = 11;
    -}
    -
    -message AllocateIdsRequest {
    -  optional InternalHeader header = 4;
    -
    -  optional Reference model_key = 1;
    -
    -  optional int64 size = 2;
    -
    -  optional int64 max = 3;
    -
    -  repeated Reference reserve = 5;
    -}
    -
    -message AllocateIdsResponse {
    -  required int64 start = 1;
    -  required int64 end = 2;
    -  optional Cost cost = 3;
    -}
    -
    -message CompositeIndices {
    -  repeated CompositeIndex index = 1;
    -}
    -
    -message AddActionsRequest {
    -  optional InternalHeader header = 3;
    -
    -  required Transaction transaction = 1;
    -  repeated Action action = 2;
    -}
    -
    -message AddActionsResponse {
    -}
    -
    -message BeginTransactionRequest {
    -  optional InternalHeader header = 3;
    -
    -  required string app = 1;
    -  optional bool allow_multiple_eg = 2 [default = false];
    -  optional string database_id = 4;
    -
    -  enum TransactionMode {
    -    UNKNOWN = 0;
    -    READ_ONLY = 1;
    -    READ_WRITE = 2;
    -  }
    -  optional TransactionMode mode = 5 [default = UNKNOWN];
    -
    -  optional Transaction previous_transaction = 7;
    -}
    -
    -message CommitResponse {
    -  optional Cost cost = 1;
    -
    -  repeated group Version = 3 {
    -    required Reference root_entity_key = 4;
    -    required int64 version = 5;
    -  }
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
    deleted file mode 100644
    index 0f95aa91d5..0000000000
    --- a/vendor/google.golang.org/appengine/internal/identity.go
    +++ /dev/null
    @@ -1,54 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -import (
    -	"context"
    -	"os"
    -)
    -
    -var (
    -	// This is set to true in identity_classic.go, which is behind the appengine build tag.
    -	// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
    -	// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
    -	// first-gen runtime. See IsStandard below for the second-gen check.
    -	appengineStandard bool
    -
    -	// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
    -	appengineFlex bool
    -)
    -
    -// AppID is the implementation of the wrapper function of the same name in
    -// ../identity.go. See that file for commentary.
    -func AppID(c context.Context) string {
    -	return appID(FullyQualifiedAppID(c))
    -}
    -
    -// IsStandard is the implementation of the wrapper function of the same name in
    -// ../appengine.go. See that file for commentary.
    -func IsStandard() bool {
    -	// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
    -	// second-gen (>= Go 1.11).
    -	return appengineStandard || IsSecondGen()
    -}
    -
    -// IsSecondGen is the implementation of the wrapper function of the same name in
    -// ../appengine.go. See that file for commentary.
    -func IsSecondGen() bool {
    -	// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
    -	return os.Getenv("GAE_ENV") == "standard"
    -}
    -
    -// IsFlex is the implementation of the wrapper function of the same name in
    -// ../appengine.go. See that file for commentary.
    -func IsFlex() bool {
    -	return appengineFlex
    -}
    -
    -// IsAppEngine is the implementation of the wrapper function of the same name in
    -// ../appengine.go. See that file for commentary.
    -func IsAppEngine() bool {
    -	return IsStandard() || IsFlex()
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
    deleted file mode 100644
    index 5ad3548bf7..0000000000
    --- a/vendor/google.golang.org/appengine/internal/identity_classic.go
    +++ /dev/null
    @@ -1,62 +0,0 @@
    -// Copyright 2015 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build appengine
    -// +build appengine
    -
    -package internal
    -
    -import (
    -	"context"
    -
    -	"appengine"
    -)
    -
    -func init() {
    -	appengineStandard = true
    -}
    -
    -func DefaultVersionHostname(ctx context.Context) string {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	return appengine.DefaultVersionHostname(c)
    -}
    -
    -func Datacenter(_ context.Context) string { return appengine.Datacenter() }
    -func ServerSoftware() string              { return appengine.ServerSoftware() }
    -func InstanceID() string                  { return appengine.InstanceID() }
    -func IsDevAppServer() bool                { return appengine.IsDevAppServer() }
    -
    -func RequestID(ctx context.Context) string {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	return appengine.RequestID(c)
    -}
    -
    -func ModuleName(ctx context.Context) string {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	return appengine.ModuleName(c)
    -}
    -func VersionID(ctx context.Context) string {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	return appengine.VersionID(c)
    -}
    -
    -func fullyQualifiedAppID(ctx context.Context) string {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		panic(errNotAppEngineContext)
    -	}
    -	return c.FullyQualifiedAppID()
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
    deleted file mode 100644
    index 4201b6b585..0000000000
    --- a/vendor/google.golang.org/appengine/internal/identity_flex.go
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -// Copyright 2018 Google LLC. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build appenginevm
    -// +build appenginevm
    -
    -package internal
    -
    -func init() {
    -	appengineFlex = true
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
    deleted file mode 100644
    index 18ddda3a42..0000000000
    --- a/vendor/google.golang.org/appengine/internal/identity_vm.go
    +++ /dev/null
    @@ -1,134 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build !appengine
    -// +build !appengine
    -
    -package internal
    -
    -import (
    -	"context"
    -	"log"
    -	"net/http"
    -	"os"
    -	"strings"
    -)
    -
    -// These functions are implementations of the wrapper functions
    -// in ../appengine/identity.go. See that file for commentary.
    -
    -const (
    -	hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
    -	hRequestLogId           = "X-AppEngine-Request-Log-Id"
    -	hDatacenter             = "X-AppEngine-Datacenter"
    -)
    -
    -func ctxHeaders(ctx context.Context) http.Header {
    -	c := fromContext(ctx)
    -	if c == nil {
    -		return nil
    -	}
    -	return c.Request().Header
    -}
    -
    -func DefaultVersionHostname(ctx context.Context) string {
    -	return ctxHeaders(ctx).Get(hDefaultVersionHostname)
    -}
    -
    -func RequestID(ctx context.Context) string {
    -	return ctxHeaders(ctx).Get(hRequestLogId)
    -}
    -
    -func Datacenter(ctx context.Context) string {
    -	if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
    -		return dc
    -	}
    -	// If the header isn't set, read zone from the metadata service.
    -	// It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
    -	zone, err := getMetadata("instance/zone")
    -	if err != nil {
    -		log.Printf("Datacenter: %v", err)
    -		return ""
    -	}
    -	parts := strings.Split(string(zone), "/")
    -	if len(parts) == 0 {
    -		return ""
    -	}
    -	return parts[len(parts)-1]
    -}
    -
    -func ServerSoftware() string {
    -	// TODO(dsymonds): Remove fallback when we've verified this.
    -	if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
    -		return s
    -	}
    -	if s := os.Getenv("GAE_ENV"); s != "" {
    -		return s
    -	}
    -	return "Google App Engine/1.x.x"
    -}
    -
    -// TODO(dsymonds): Remove the metadata fetches.
    -
    -func ModuleName(_ context.Context) string {
    -	if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
    -		return s
    -	}
    -	if s := os.Getenv("GAE_SERVICE"); s != "" {
    -		return s
    -	}
    -	return string(mustGetMetadata("instance/attributes/gae_backend_name"))
    -}
    -
    -func VersionID(_ context.Context) string {
    -	if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
    -		return s1 + "." + s2
    -	}
    -	if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
    -		return s1 + "." + s2
    -	}
    -	return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
    -}
    -
    -func InstanceID() string {
    -	if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
    -		return s
    -	}
    -	if s := os.Getenv("GAE_INSTANCE"); s != "" {
    -		return s
    -	}
    -	return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
    -}
    -
    -func partitionlessAppID() string {
    -	// gae_project has everything except the partition prefix.
    -	if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
    -		return appID
    -	}
    -	if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
    -		return project
    -	}
    -	return string(mustGetMetadata("instance/attributes/gae_project"))
    -}
    -
    -func fullyQualifiedAppID(_ context.Context) string {
    -	if s := os.Getenv("GAE_APPLICATION"); s != "" {
    -		return s
    -	}
    -	appID := partitionlessAppID()
    -
    -	part := os.Getenv("GAE_PARTITION")
    -	if part == "" {
    -		part = string(mustGetMetadata("instance/attributes/gae_partition"))
    -	}
    -
    -	if part != "" {
    -		appID = part + "~" + appID
    -	}
    -	return appID
    -}
    -
    -func IsDevAppServer() bool {
    -	return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev"
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
    deleted file mode 100644
    index 051ea3980a..0000000000
    --- a/vendor/google.golang.org/appengine/internal/internal.go
    +++ /dev/null
    @@ -1,110 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -// Package internal provides support for package appengine.
    -//
    -// Programs should not use this package directly. Its API is not stable.
    -// Use packages appengine and appengine/* instead.
    -package internal
    -
    -import (
    -	"fmt"
    -
    -	"github.com/golang/protobuf/proto"
    -
    -	remotepb "google.golang.org/appengine/internal/remote_api"
    -)
    -
    -// errorCodeMaps is a map of service name to the error code map for the service.
    -var errorCodeMaps = make(map[string]map[int32]string)
    -
    -// RegisterErrorCodeMap is called from API implementations to register their
    -// error code map. This should only be called from init functions.
    -func RegisterErrorCodeMap(service string, m map[int32]string) {
    -	errorCodeMaps[service] = m
    -}
    -
    -type timeoutCodeKey struct {
    -	service string
    -	code    int32
    -}
    -
    -// timeoutCodes is the set of service+code pairs that represent timeouts.
    -var timeoutCodes = make(map[timeoutCodeKey]bool)
    -
    -func RegisterTimeoutErrorCode(service string, code int32) {
    -	timeoutCodes[timeoutCodeKey{service, code}] = true
    -}
    -
    -// APIError is the type returned by appengine.Context's Call method
    -// when an API call fails in an API-specific way. This may be, for instance,
    -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
    -type APIError struct {
    -	Service string
    -	Detail  string
    -	Code    int32 // API-specific error code
    -}
    -
    -func (e *APIError) Error() string {
    -	if e.Code == 0 {
    -		if e.Detail == "" {
    -			return "APIError "
    -		}
    -		return e.Detail
    -	}
    -	s := fmt.Sprintf("API error %d", e.Code)
    -	if m, ok := errorCodeMaps[e.Service]; ok {
    -		s += " (" + e.Service + ": " + m[e.Code] + ")"
    -	} else {
    -		// Shouldn't happen, but provide a bit more detail if it does.
    -		s = e.Service + " " + s
    -	}
    -	if e.Detail != "" {
    -		s += ": " + e.Detail
    -	}
    -	return s
    -}
    -
    -func (e *APIError) IsTimeout() bool {
    -	return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
    -}
    -
    -// CallError is the type returned by appengine.Context's Call method when an
    -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
    -type CallError struct {
    -	Detail string
    -	Code   int32
    -	// TODO: Remove this if we get a distinguishable error code.
    -	Timeout bool
    -}
    -
    -func (e *CallError) Error() string {
    -	var msg string
    -	switch remotepb.RpcError_ErrorCode(e.Code) {
    -	case remotepb.RpcError_UNKNOWN:
    -		return e.Detail
    -	case remotepb.RpcError_OVER_QUOTA:
    -		msg = "Over quota"
    -	case remotepb.RpcError_CAPABILITY_DISABLED:
    -		msg = "Capability disabled"
    -	case remotepb.RpcError_CANCELLED:
    -		msg = "Canceled"
    -	default:
    -		msg = fmt.Sprintf("Call error %d", e.Code)
    -	}
    -	s := msg + ": " + e.Detail
    -	if e.Timeout {
    -		s += " (timeout)"
    -	}
    -	return s
    -}
    -
    -func (e *CallError) IsTimeout() bool {
    -	return e.Timeout
    -}
    -
    -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
    -// The function should be prepared to be called on the same message more than once; it should only modify the
    -// RPC request the first time.
    -var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
    diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
    deleted file mode 100644
    index 8545ac4ad6..0000000000
    --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
    +++ /dev/null
    @@ -1,1313 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/log/log_service.proto
    -
    -package log
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type LogServiceError_ErrorCode int32
    -
    -const (
    -	LogServiceError_OK              LogServiceError_ErrorCode = 0
    -	LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
    -	LogServiceError_STORAGE_ERROR   LogServiceError_ErrorCode = 2
    -)
    -
    -var LogServiceError_ErrorCode_name = map[int32]string{
    -	0: "OK",
    -	1: "INVALID_REQUEST",
    -	2: "STORAGE_ERROR",
    -}
    -var LogServiceError_ErrorCode_value = map[string]int32{
    -	"OK":              0,
    -	"INVALID_REQUEST": 1,
    -	"STORAGE_ERROR":   2,
    -}
    -
    -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
    -	p := new(LogServiceError_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x LogServiceError_ErrorCode) String() string {
    -	return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
    -}
    -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = LogServiceError_ErrorCode(value)
    -	return nil
    -}
    -func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
    -}
    -
    -type LogServiceError struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogServiceError) Reset()         { *m = LogServiceError{} }
    -func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
    -func (*LogServiceError) ProtoMessage()    {}
    -func (*LogServiceError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
    -}
    -func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
    -}
    -func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
    -}
    -func (dst *LogServiceError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogServiceError.Merge(dst, src)
    -}
    -func (m *LogServiceError) XXX_Size() int {
    -	return xxx_messageInfo_LogServiceError.Size(m)
    -}
    -func (m *LogServiceError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogServiceError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
    -
    -type UserAppLogLine struct {
    -	TimestampUsec        *int64   `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
    -	Level                *int64   `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
    -	Message              *string  `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *UserAppLogLine) Reset()         { *m = UserAppLogLine{} }
    -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
    -func (*UserAppLogLine) ProtoMessage()    {}
    -func (*UserAppLogLine) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
    -}
    -func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
    -}
    -func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
    -}
    -func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_UserAppLogLine.Merge(dst, src)
    -}
    -func (m *UserAppLogLine) XXX_Size() int {
    -	return xxx_messageInfo_UserAppLogLine.Size(m)
    -}
    -func (m *UserAppLogLine) XXX_DiscardUnknown() {
    -	xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
    -
    -func (m *UserAppLogLine) GetTimestampUsec() int64 {
    -	if m != nil && m.TimestampUsec != nil {
    -		return *m.TimestampUsec
    -	}
    -	return 0
    -}
    -
    -func (m *UserAppLogLine) GetLevel() int64 {
    -	if m != nil && m.Level != nil {
    -		return *m.Level
    -	}
    -	return 0
    -}
    -
    -func (m *UserAppLogLine) GetMessage() string {
    -	if m != nil && m.Message != nil {
    -		return *m.Message
    -	}
    -	return ""
    -}
    -
    -type UserAppLogGroup struct {
    -	LogLine              []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *UserAppLogGroup) Reset()         { *m = UserAppLogGroup{} }
    -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
    -func (*UserAppLogGroup) ProtoMessage()    {}
    -func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
    -}
    -func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
    -}
    -func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
    -}
    -func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
    -}
    -func (m *UserAppLogGroup) XXX_Size() int {
    -	return xxx_messageInfo_UserAppLogGroup.Size(m)
    -}
    -func (m *UserAppLogGroup) XXX_DiscardUnknown() {
    -	xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
    -
    -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
    -	if m != nil {
    -		return m.LogLine
    -	}
    -	return nil
    -}
    -
    -type FlushRequest struct {
    -	Logs                 []byte   `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *FlushRequest) Reset()         { *m = FlushRequest{} }
    -func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
    -func (*FlushRequest) ProtoMessage()    {}
    -func (*FlushRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
    -}
    -func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
    -}
    -func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *FlushRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_FlushRequest.Merge(dst, src)
    -}
    -func (m *FlushRequest) XXX_Size() int {
    -	return xxx_messageInfo_FlushRequest.Size(m)
    -}
    -func (m *FlushRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_FlushRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
    -
    -func (m *FlushRequest) GetLogs() []byte {
    -	if m != nil {
    -		return m.Logs
    -	}
    -	return nil
    -}
    -
    -type SetStatusRequest struct {
    -	Status               *string  `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *SetStatusRequest) Reset()         { *m = SetStatusRequest{} }
    -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
    -func (*SetStatusRequest) ProtoMessage()    {}
    -func (*SetStatusRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
    -}
    -func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
    -}
    -func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_SetStatusRequest.Merge(dst, src)
    -}
    -func (m *SetStatusRequest) XXX_Size() int {
    -	return xxx_messageInfo_SetStatusRequest.Size(m)
    -}
    -func (m *SetStatusRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
    -
    -func (m *SetStatusRequest) GetStatus() string {
    -	if m != nil && m.Status != nil {
    -		return *m.Status
    -	}
    -	return ""
    -}
    -
    -type LogOffset struct {
    -	RequestId            []byte   `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogOffset) Reset()         { *m = LogOffset{} }
    -func (m *LogOffset) String() string { return proto.CompactTextString(m) }
    -func (*LogOffset) ProtoMessage()    {}
    -func (*LogOffset) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
    -}
    -func (m *LogOffset) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogOffset.Unmarshal(m, b)
    -}
    -func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
    -}
    -func (dst *LogOffset) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogOffset.Merge(dst, src)
    -}
    -func (m *LogOffset) XXX_Size() int {
    -	return xxx_messageInfo_LogOffset.Size(m)
    -}
    -func (m *LogOffset) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogOffset.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogOffset proto.InternalMessageInfo
    -
    -func (m *LogOffset) GetRequestId() []byte {
    -	if m != nil {
    -		return m.RequestId
    -	}
    -	return nil
    -}
    -
    -type LogLine struct {
    -	Time                 *int64   `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
    -	Level                *int32   `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
    -	LogMessage           *string  `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogLine) Reset()         { *m = LogLine{} }
    -func (m *LogLine) String() string { return proto.CompactTextString(m) }
    -func (*LogLine) ProtoMessage()    {}
    -func (*LogLine) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
    -}
    -func (m *LogLine) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogLine.Unmarshal(m, b)
    -}
    -func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
    -}
    -func (dst *LogLine) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogLine.Merge(dst, src)
    -}
    -func (m *LogLine) XXX_Size() int {
    -	return xxx_messageInfo_LogLine.Size(m)
    -}
    -func (m *LogLine) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogLine.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogLine proto.InternalMessageInfo
    -
    -func (m *LogLine) GetTime() int64 {
    -	if m != nil && m.Time != nil {
    -		return *m.Time
    -	}
    -	return 0
    -}
    -
    -func (m *LogLine) GetLevel() int32 {
    -	if m != nil && m.Level != nil {
    -		return *m.Level
    -	}
    -	return 0
    -}
    -
    -func (m *LogLine) GetLogMessage() string {
    -	if m != nil && m.LogMessage != nil {
    -		return *m.LogMessage
    -	}
    -	return ""
    -}
    -
    -type RequestLog struct {
    -	AppId                   *string    `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
    -	ModuleId                *string    `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
    -	VersionId               *string    `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
    -	RequestId               []byte     `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
    -	Offset                  *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
    -	Ip                      *string    `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
    -	Nickname                *string    `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
    -	StartTime               *int64     `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
    -	EndTime                 *int64     `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
    -	Latency                 *int64     `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
    -	Mcycles                 *int64     `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
    -	Method                  *string    `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
    -	Resource                *string    `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
    -	HttpVersion             *string    `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
    -	Status                  *int32     `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
    -	ResponseSize            *int64     `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
    -	Referrer                *string    `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
    -	UserAgent               *string    `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
    -	UrlMapEntry             *string    `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
    -	Combined                *string    `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
    -	ApiMcycles              *int64     `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
    -	Host                    *string    `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
    -	Cost                    *float64   `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
    -	TaskQueueName           *string    `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
    -	TaskName                *string    `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
    -	WasLoadingRequest       *bool      `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
    -	PendingTime             *int64     `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
    -	ReplicaIndex            *int32     `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
    -	Finished                *bool      `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
    -	CloneKey                []byte     `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
    -	Line                    []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
    -	LinesIncomplete         *bool      `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
    -	AppEngineRelease        []byte     `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
    -	ExitReason              *int32     `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
    -	WasThrottledForTime     *bool      `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
    -	WasThrottledForRequests *bool      `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
    -	ThrottledTime           *int64     `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
    -	ServerName              []byte     `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
    -	XXX_NoUnkeyedLiteral    struct{}   `json:"-"`
    -	XXX_unrecognized        []byte     `json:"-"`
    -	XXX_sizecache           int32      `json:"-"`
    -}
    -
    -func (m *RequestLog) Reset()         { *m = RequestLog{} }
    -func (m *RequestLog) String() string { return proto.CompactTextString(m) }
    -func (*RequestLog) ProtoMessage()    {}
    -func (*RequestLog) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
    -}
    -func (m *RequestLog) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_RequestLog.Unmarshal(m, b)
    -}
    -func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
    -}
    -func (dst *RequestLog) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_RequestLog.Merge(dst, src)
    -}
    -func (m *RequestLog) XXX_Size() int {
    -	return xxx_messageInfo_RequestLog.Size(m)
    -}
    -func (m *RequestLog) XXX_DiscardUnknown() {
    -	xxx_messageInfo_RequestLog.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_RequestLog proto.InternalMessageInfo
    -
    -const Default_RequestLog_ModuleId string = "default"
    -const Default_RequestLog_ReplicaIndex int32 = -1
    -const Default_RequestLog_Finished bool = true
    -
    -func (m *RequestLog) GetAppId() string {
    -	if m != nil && m.AppId != nil {
    -		return *m.AppId
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetModuleId() string {
    -	if m != nil && m.ModuleId != nil {
    -		return *m.ModuleId
    -	}
    -	return Default_RequestLog_ModuleId
    -}
    -
    -func (m *RequestLog) GetVersionId() string {
    -	if m != nil && m.VersionId != nil {
    -		return *m.VersionId
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetRequestId() []byte {
    -	if m != nil {
    -		return m.RequestId
    -	}
    -	return nil
    -}
    -
    -func (m *RequestLog) GetOffset() *LogOffset {
    -	if m != nil {
    -		return m.Offset
    -	}
    -	return nil
    -}
    -
    -func (m *RequestLog) GetIp() string {
    -	if m != nil && m.Ip != nil {
    -		return *m.Ip
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetNickname() string {
    -	if m != nil && m.Nickname != nil {
    -		return *m.Nickname
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetStartTime() int64 {
    -	if m != nil && m.StartTime != nil {
    -		return *m.StartTime
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetEndTime() int64 {
    -	if m != nil && m.EndTime != nil {
    -		return *m.EndTime
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetLatency() int64 {
    -	if m != nil && m.Latency != nil {
    -		return *m.Latency
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetMcycles() int64 {
    -	if m != nil && m.Mcycles != nil {
    -		return *m.Mcycles
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetMethod() string {
    -	if m != nil && m.Method != nil {
    -		return *m.Method
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetResource() string {
    -	if m != nil && m.Resource != nil {
    -		return *m.Resource
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetHttpVersion() string {
    -	if m != nil && m.HttpVersion != nil {
    -		return *m.HttpVersion
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetStatus() int32 {
    -	if m != nil && m.Status != nil {
    -		return *m.Status
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetResponseSize() int64 {
    -	if m != nil && m.ResponseSize != nil {
    -		return *m.ResponseSize
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetReferrer() string {
    -	if m != nil && m.Referrer != nil {
    -		return *m.Referrer
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetUserAgent() string {
    -	if m != nil && m.UserAgent != nil {
    -		return *m.UserAgent
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetUrlMapEntry() string {
    -	if m != nil && m.UrlMapEntry != nil {
    -		return *m.UrlMapEntry
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetCombined() string {
    -	if m != nil && m.Combined != nil {
    -		return *m.Combined
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetApiMcycles() int64 {
    -	if m != nil && m.ApiMcycles != nil {
    -		return *m.ApiMcycles
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetHost() string {
    -	if m != nil && m.Host != nil {
    -		return *m.Host
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetCost() float64 {
    -	if m != nil && m.Cost != nil {
    -		return *m.Cost
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetTaskQueueName() string {
    -	if m != nil && m.TaskQueueName != nil {
    -		return *m.TaskQueueName
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetTaskName() string {
    -	if m != nil && m.TaskName != nil {
    -		return *m.TaskName
    -	}
    -	return ""
    -}
    -
    -func (m *RequestLog) GetWasLoadingRequest() bool {
    -	if m != nil && m.WasLoadingRequest != nil {
    -		return *m.WasLoadingRequest
    -	}
    -	return false
    -}
    -
    -func (m *RequestLog) GetPendingTime() int64 {
    -	if m != nil && m.PendingTime != nil {
    -		return *m.PendingTime
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetReplicaIndex() int32 {
    -	if m != nil && m.ReplicaIndex != nil {
    -		return *m.ReplicaIndex
    -	}
    -	return Default_RequestLog_ReplicaIndex
    -}
    -
    -func (m *RequestLog) GetFinished() bool {
    -	if m != nil && m.Finished != nil {
    -		return *m.Finished
    -	}
    -	return Default_RequestLog_Finished
    -}
    -
    -func (m *RequestLog) GetCloneKey() []byte {
    -	if m != nil {
    -		return m.CloneKey
    -	}
    -	return nil
    -}
    -
    -func (m *RequestLog) GetLine() []*LogLine {
    -	if m != nil {
    -		return m.Line
    -	}
    -	return nil
    -}
    -
    -func (m *RequestLog) GetLinesIncomplete() bool {
    -	if m != nil && m.LinesIncomplete != nil {
    -		return *m.LinesIncomplete
    -	}
    -	return false
    -}
    -
    -func (m *RequestLog) GetAppEngineRelease() []byte {
    -	if m != nil {
    -		return m.AppEngineRelease
    -	}
    -	return nil
    -}
    -
    -func (m *RequestLog) GetExitReason() int32 {
    -	if m != nil && m.ExitReason != nil {
    -		return *m.ExitReason
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetWasThrottledForTime() bool {
    -	if m != nil && m.WasThrottledForTime != nil {
    -		return *m.WasThrottledForTime
    -	}
    -	return false
    -}
    -
    -func (m *RequestLog) GetWasThrottledForRequests() bool {
    -	if m != nil && m.WasThrottledForRequests != nil {
    -		return *m.WasThrottledForRequests
    -	}
    -	return false
    -}
    -
    -func (m *RequestLog) GetThrottledTime() int64 {
    -	if m != nil && m.ThrottledTime != nil {
    -		return *m.ThrottledTime
    -	}
    -	return 0
    -}
    -
    -func (m *RequestLog) GetServerName() []byte {
    -	if m != nil {
    -		return m.ServerName
    -	}
    -	return nil
    -}
    -
    -type LogModuleVersion struct {
    -	ModuleId             *string  `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
    -	VersionId            *string  `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogModuleVersion) Reset()         { *m = LogModuleVersion{} }
    -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
    -func (*LogModuleVersion) ProtoMessage()    {}
    -func (*LogModuleVersion) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
    -}
    -func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
    -}
    -func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
    -}
    -func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogModuleVersion.Merge(dst, src)
    -}
    -func (m *LogModuleVersion) XXX_Size() int {
    -	return xxx_messageInfo_LogModuleVersion.Size(m)
    -}
    -func (m *LogModuleVersion) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
    -
    -const Default_LogModuleVersion_ModuleId string = "default"
    -
    -func (m *LogModuleVersion) GetModuleId() string {
    -	if m != nil && m.ModuleId != nil {
    -		return *m.ModuleId
    -	}
    -	return Default_LogModuleVersion_ModuleId
    -}
    -
    -func (m *LogModuleVersion) GetVersionId() string {
    -	if m != nil && m.VersionId != nil {
    -		return *m.VersionId
    -	}
    -	return ""
    -}
    -
    -type LogReadRequest struct {
    -	AppId                *string             `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
    -	VersionId            []string            `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
    -	ModuleVersion        []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
    -	StartTime            *int64              `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
    -	EndTime              *int64              `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
    -	Offset               *LogOffset          `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
    -	RequestId            [][]byte            `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
    -	MinimumLogLevel      *int32              `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
    -	IncludeIncomplete    *bool               `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
    -	Count                *int64              `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
    -	CombinedLogRegex     *string             `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
    -	HostRegex            *string             `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
    -	ReplicaIndex         *int32              `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
    -	IncludeAppLogs       *bool               `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
    -	AppLogsPerRequest    *int32              `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
    -	IncludeHost          *bool               `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
    -	IncludeAll           *bool               `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
    -	CacheIterator        *bool               `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
    -	NumShards            *int32              `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
    -	XXX_unrecognized     []byte              `json:"-"`
    -	XXX_sizecache        int32               `json:"-"`
    -}
    -
    -func (m *LogReadRequest) Reset()         { *m = LogReadRequest{} }
    -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
    -func (*LogReadRequest) ProtoMessage()    {}
    -func (*LogReadRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
    -}
    -func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
    -}
    -func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogReadRequest.Merge(dst, src)
    -}
    -func (m *LogReadRequest) XXX_Size() int {
    -	return xxx_messageInfo_LogReadRequest.Size(m)
    -}
    -func (m *LogReadRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
    -
    -func (m *LogReadRequest) GetAppId() string {
    -	if m != nil && m.AppId != nil {
    -		return *m.AppId
    -	}
    -	return ""
    -}
    -
    -func (m *LogReadRequest) GetVersionId() []string {
    -	if m != nil {
    -		return m.VersionId
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
    -	if m != nil {
    -		return m.ModuleVersion
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadRequest) GetStartTime() int64 {
    -	if m != nil && m.StartTime != nil {
    -		return *m.StartTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetEndTime() int64 {
    -	if m != nil && m.EndTime != nil {
    -		return *m.EndTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetOffset() *LogOffset {
    -	if m != nil {
    -		return m.Offset
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadRequest) GetRequestId() [][]byte {
    -	if m != nil {
    -		return m.RequestId
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadRequest) GetMinimumLogLevel() int32 {
    -	if m != nil && m.MinimumLogLevel != nil {
    -		return *m.MinimumLogLevel
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetIncludeIncomplete() bool {
    -	if m != nil && m.IncludeIncomplete != nil {
    -		return *m.IncludeIncomplete
    -	}
    -	return false
    -}
    -
    -func (m *LogReadRequest) GetCount() int64 {
    -	if m != nil && m.Count != nil {
    -		return *m.Count
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetCombinedLogRegex() string {
    -	if m != nil && m.CombinedLogRegex != nil {
    -		return *m.CombinedLogRegex
    -	}
    -	return ""
    -}
    -
    -func (m *LogReadRequest) GetHostRegex() string {
    -	if m != nil && m.HostRegex != nil {
    -		return *m.HostRegex
    -	}
    -	return ""
    -}
    -
    -func (m *LogReadRequest) GetReplicaIndex() int32 {
    -	if m != nil && m.ReplicaIndex != nil {
    -		return *m.ReplicaIndex
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetIncludeAppLogs() bool {
    -	if m != nil && m.IncludeAppLogs != nil {
    -		return *m.IncludeAppLogs
    -	}
    -	return false
    -}
    -
    -func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
    -	if m != nil && m.AppLogsPerRequest != nil {
    -		return *m.AppLogsPerRequest
    -	}
    -	return 0
    -}
    -
    -func (m *LogReadRequest) GetIncludeHost() bool {
    -	if m != nil && m.IncludeHost != nil {
    -		return *m.IncludeHost
    -	}
    -	return false
    -}
    -
    -func (m *LogReadRequest) GetIncludeAll() bool {
    -	if m != nil && m.IncludeAll != nil {
    -		return *m.IncludeAll
    -	}
    -	return false
    -}
    -
    -func (m *LogReadRequest) GetCacheIterator() bool {
    -	if m != nil && m.CacheIterator != nil {
    -		return *m.CacheIterator
    -	}
    -	return false
    -}
    -
    -func (m *LogReadRequest) GetNumShards() int32 {
    -	if m != nil && m.NumShards != nil {
    -		return *m.NumShards
    -	}
    -	return 0
    -}
    -
    -type LogReadResponse struct {
    -	Log                  []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
    -	Offset               *LogOffset    `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
    -	LastEndTime          *int64        `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
    -	XXX_unrecognized     []byte        `json:"-"`
    -	XXX_sizecache        int32         `json:"-"`
    -}
    -
    -func (m *LogReadResponse) Reset()         { *m = LogReadResponse{} }
    -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
    -func (*LogReadResponse) ProtoMessage()    {}
    -func (*LogReadResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
    -}
    -func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
    -}
    -func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogReadResponse.Merge(dst, src)
    -}
    -func (m *LogReadResponse) XXX_Size() int {
    -	return xxx_messageInfo_LogReadResponse.Size(m)
    -}
    -func (m *LogReadResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
    -
    -func (m *LogReadResponse) GetLog() []*RequestLog {
    -	if m != nil {
    -		return m.Log
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadResponse) GetOffset() *LogOffset {
    -	if m != nil {
    -		return m.Offset
    -	}
    -	return nil
    -}
    -
    -func (m *LogReadResponse) GetLastEndTime() int64 {
    -	if m != nil && m.LastEndTime != nil {
    -		return *m.LastEndTime
    -	}
    -	return 0
    -}
    -
    -type LogUsageRecord struct {
    -	VersionId            *string  `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
    -	StartTime            *int32   `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
    -	EndTime              *int32   `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
    -	Count                *int64   `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
    -	TotalSize            *int64   `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
    -	Records              *int32   `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogUsageRecord) Reset()         { *m = LogUsageRecord{} }
    -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
    -func (*LogUsageRecord) ProtoMessage()    {}
    -func (*LogUsageRecord) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
    -}
    -func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
    -}
    -func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
    -}
    -func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogUsageRecord.Merge(dst, src)
    -}
    -func (m *LogUsageRecord) XXX_Size() int {
    -	return xxx_messageInfo_LogUsageRecord.Size(m)
    -}
    -func (m *LogUsageRecord) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
    -
    -func (m *LogUsageRecord) GetVersionId() string {
    -	if m != nil && m.VersionId != nil {
    -		return *m.VersionId
    -	}
    -	return ""
    -}
    -
    -func (m *LogUsageRecord) GetStartTime() int32 {
    -	if m != nil && m.StartTime != nil {
    -		return *m.StartTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRecord) GetEndTime() int32 {
    -	if m != nil && m.EndTime != nil {
    -		return *m.EndTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRecord) GetCount() int64 {
    -	if m != nil && m.Count != nil {
    -		return *m.Count
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRecord) GetTotalSize() int64 {
    -	if m != nil && m.TotalSize != nil {
    -		return *m.TotalSize
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRecord) GetRecords() int32 {
    -	if m != nil && m.Records != nil {
    -		return *m.Records
    -	}
    -	return 0
    -}
    -
    -type LogUsageRequest struct {
    -	AppId                *string  `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
    -	VersionId            []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
    -	StartTime            *int32   `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
    -	EndTime              *int32   `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
    -	ResolutionHours      *uint32  `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
    -	CombineVersions      *bool    `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
    -	UsageVersion         *int32   `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
    -	VersionsOnly         *bool    `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *LogUsageRequest) Reset()         { *m = LogUsageRequest{} }
    -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
    -func (*LogUsageRequest) ProtoMessage()    {}
    -func (*LogUsageRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
    -}
    -func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
    -}
    -func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogUsageRequest.Merge(dst, src)
    -}
    -func (m *LogUsageRequest) XXX_Size() int {
    -	return xxx_messageInfo_LogUsageRequest.Size(m)
    -}
    -func (m *LogUsageRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
    -
    -const Default_LogUsageRequest_ResolutionHours uint32 = 1
    -
    -func (m *LogUsageRequest) GetAppId() string {
    -	if m != nil && m.AppId != nil {
    -		return *m.AppId
    -	}
    -	return ""
    -}
    -
    -func (m *LogUsageRequest) GetVersionId() []string {
    -	if m != nil {
    -		return m.VersionId
    -	}
    -	return nil
    -}
    -
    -func (m *LogUsageRequest) GetStartTime() int32 {
    -	if m != nil && m.StartTime != nil {
    -		return *m.StartTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRequest) GetEndTime() int32 {
    -	if m != nil && m.EndTime != nil {
    -		return *m.EndTime
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRequest) GetResolutionHours() uint32 {
    -	if m != nil && m.ResolutionHours != nil {
    -		return *m.ResolutionHours
    -	}
    -	return Default_LogUsageRequest_ResolutionHours
    -}
    -
    -func (m *LogUsageRequest) GetCombineVersions() bool {
    -	if m != nil && m.CombineVersions != nil {
    -		return *m.CombineVersions
    -	}
    -	return false
    -}
    -
    -func (m *LogUsageRequest) GetUsageVersion() int32 {
    -	if m != nil && m.UsageVersion != nil {
    -		return *m.UsageVersion
    -	}
    -	return 0
    -}
    -
    -func (m *LogUsageRequest) GetVersionsOnly() bool {
    -	if m != nil && m.VersionsOnly != nil {
    -		return *m.VersionsOnly
    -	}
    -	return false
    -}
    -
    -type LogUsageResponse struct {
    -	Usage                []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
    -	Summary              *LogUsageRecord   `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *LogUsageResponse) Reset()         { *m = LogUsageResponse{} }
    -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
    -func (*LogUsageResponse) ProtoMessage()    {}
    -func (*LogUsageResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
    -}
    -func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
    -}
    -func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_LogUsageResponse.Merge(dst, src)
    -}
    -func (m *LogUsageResponse) XXX_Size() int {
    -	return xxx_messageInfo_LogUsageResponse.Size(m)
    -}
    -func (m *LogUsageResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
    -
    -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
    -	if m != nil {
    -		return m.Usage
    -	}
    -	return nil
    -}
    -
    -func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
    -	if m != nil {
    -		return m.Summary
    -	}
    -	return nil
    -}
    -
    -func init() {
    -	proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
    -	proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
    -	proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
    -	proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
    -	proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
    -	proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
    -	proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
    -	proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
    -	proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
    -	proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
    -	proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
    -	proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
    -	proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
    -	proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
    -}
    -
    -var fileDescriptor_log_service_f054fd4b5012319d = []byte{
    -	// 1553 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
    -	0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
    -	0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
    -	0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
    -	0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
    -	0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
    -	0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
    -	0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
    -	0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
    -	0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
    -	0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
    -	0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
    -	0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
    -	0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
    -	0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
    -	0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
    -	0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
    -	0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
    -	0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
    -	0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
    -	0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
    -	0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
    -	0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
    -	0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
    -	0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
    -	0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
    -	0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
    -	0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
    -	0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
    -	0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
    -	0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
    -	0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
    -	0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
    -	0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
    -	0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
    -	0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
    -	0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
    -	0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
    -	0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
    -	0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
    -	0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
    -	0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
    -	0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
    -	0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
    -	0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
    -	0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
    -	0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
    -	0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
    -	0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
    -	0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
    -	0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
    -	0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
    -	0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
    -	0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
    -	0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
    -	0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
    -	0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
    -	0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
    -	0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
    -	0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
    -	0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
    -	0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
    -	0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
    -	0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
    -	0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
    -	0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
    -	0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
    -	0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
    -	0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
    -	0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
    -	0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
    -	0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
    -	0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
    -	0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
    -	0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
    -	0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
    -	0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
    -	0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
    -	0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
    -	0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
    -	0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
    -	0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
    -	0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
    -	0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
    -	0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
    -	0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
    -	0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
    -	0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
    -	0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
    -	0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
    -	0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
    -	0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
    -	0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
    -	0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
    -	0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
    -	0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
    -	0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
    -	0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
    deleted file mode 100644
    index 8981dc4757..0000000000
    --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto
    +++ /dev/null
    @@ -1,150 +0,0 @@
    -syntax = "proto2";
    -option go_package = "log";
    -
    -package appengine;
    -
    -message LogServiceError {
    -  enum ErrorCode {
    -    OK  = 0;
    -    INVALID_REQUEST = 1;
    -    STORAGE_ERROR = 2;
    -  }
    -}
    -
    -message UserAppLogLine {
    -  required int64 timestamp_usec = 1;
    -  required int64 level = 2;
    -  required string message = 3;
    -}
    -
    -message UserAppLogGroup {
    -  repeated UserAppLogLine log_line = 2;
    -}
    -
    -message FlushRequest {
    -  optional bytes logs = 1;
    -}
    -
    -message SetStatusRequest {
    -  required string status = 1;
    -}
    -
    -
    -message LogOffset {
    -  optional bytes request_id = 1;
    -}
    -
    -message LogLine {
    -  required int64 time = 1;
    -  required int32 level = 2;
    -  required string log_message = 3;
    -}
    -
    -message RequestLog {
    -  required string app_id = 1;
    -  optional string module_id = 37 [default="default"];
    -  required string version_id = 2;
    -  required bytes request_id = 3;
    -  optional LogOffset offset = 35;
    -  required string ip = 4;
    -  optional string nickname = 5;
    -  required int64 start_time = 6;
    -  required int64 end_time = 7;
    -  required int64 latency = 8;
    -  required int64 mcycles = 9;
    -  required string method = 10;
    -  required string resource = 11;
    -  required string http_version = 12;
    -  required int32 status = 13;
    -  required int64 response_size = 14;
    -  optional string referrer = 15;
    -  optional string user_agent = 16;
    -  required string url_map_entry = 17;
    -  required string combined = 18;
    -  optional int64 api_mcycles = 19;
    -  optional string host = 20;
    -  optional double cost = 21;
    -
    -  optional string task_queue_name = 22;
    -  optional string task_name = 23;
    -
    -  optional bool was_loading_request = 24;
    -  optional int64 pending_time = 25;
    -  optional int32 replica_index = 26 [default = -1];
    -  optional bool finished = 27 [default = true];
    -  optional bytes clone_key = 28;
    -
    -  repeated LogLine line = 29;
    -
    -  optional bool lines_incomplete = 36;
    -  optional bytes app_engine_release = 38;
    -
    -  optional int32 exit_reason = 30;
    -  optional bool was_throttled_for_time = 31;
    -  optional bool was_throttled_for_requests = 32;
    -  optional int64 throttled_time = 33;
    -
    -  optional bytes server_name = 34;
    -}
    -
    -message LogModuleVersion {
    -  optional string module_id = 1 [default="default"];
    -  optional string version_id = 2;
    -}
    -
    -message LogReadRequest {
    -  required string app_id = 1;
    -  repeated string version_id = 2;
    -  repeated LogModuleVersion module_version = 19;
    -
    -  optional int64 start_time = 3;
    -  optional int64 end_time = 4;
    -  optional LogOffset offset = 5;
    -  repeated bytes request_id = 6;
    -
    -  optional int32 minimum_log_level = 7;
    -  optional bool include_incomplete = 8;
    -  optional int64 count = 9;
    -
    -  optional string combined_log_regex = 14;
    -  optional string host_regex = 15;
    -  optional int32 replica_index = 16;
    -
    -  optional bool include_app_logs = 10;
    -  optional int32 app_logs_per_request = 17;
    -  optional bool include_host = 11;
    -  optional bool include_all = 12;
    -  optional bool cache_iterator = 13;
    -  optional int32 num_shards = 18;
    -}
    -
    -message LogReadResponse {
    -  repeated RequestLog log = 1;
    -  optional LogOffset offset = 2;
    -  optional int64 last_end_time = 3;
    -}
    -
    -message LogUsageRecord {
    -  optional string version_id = 1;
    -  optional int32 start_time = 2;
    -  optional int32 end_time = 3;
    -  optional int64 count = 4;
    -  optional int64 total_size = 5;
    -  optional int32 records = 6;
    -}
    -
    -message LogUsageRequest {
    -  required string app_id = 1;
    -  repeated string version_id = 2;
    -  optional int32 start_time = 3;
    -  optional int32 end_time = 4;
    -  optional uint32 resolution_hours = 5 [default = 1];
    -  optional bool combine_versions = 6;
    -  optional int32 usage_version = 7;
    -  optional bool versions_only = 8;
    -}
    -
    -message LogUsageResponse {
    -  repeated LogUsageRecord usage = 1;
    -  optional LogUsageRecord summary = 2;
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
    deleted file mode 100644
    index afd0ae84fd..0000000000
    --- a/vendor/google.golang.org/appengine/internal/main.go
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build appengine
    -// +build appengine
    -
    -package internal
    -
    -import (
    -	"appengine_internal"
    -)
    -
    -func Main() {
    -	MainPath = ""
    -	appengine_internal.Main()
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
    deleted file mode 100644
    index 357dce4dd0..0000000000
    --- a/vendor/google.golang.org/appengine/internal/main_common.go
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -package internal
    -
    -// MainPath stores the file path of the main package. On App Engine Standard
    -// using Go version 1.9 and below, this will be unset. On App Engine Flex and
    -// App Engine Standard second-gen (Go 1.11 and above), this will be the
    -// filepath to package main.
    -var MainPath string
    diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
    deleted file mode 100644
    index 86a8caf06f..0000000000
    --- a/vendor/google.golang.org/appengine/internal/main_vm.go
    +++ /dev/null
    @@ -1,70 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -//go:build !appengine
    -// +build !appengine
    -
    -package internal
    -
    -import (
    -	"io"
    -	"log"
    -	"net/http"
    -	"net/url"
    -	"os"
    -	"path/filepath"
    -	"runtime"
    -)
    -
    -func Main() {
    -	MainPath = filepath.Dir(findMainPath())
    -	installHealthChecker(http.DefaultServeMux)
    -
    -	port := "8080"
    -	if s := os.Getenv("PORT"); s != "" {
    -		port = s
    -	}
    -
    -	host := ""
    -	if IsDevAppServer() {
    -		host = "127.0.0.1"
    -	}
    -	if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil {
    -		log.Fatalf("http.ListenAndServe: %v", err)
    -	}
    -}
    -
    -// Find the path to package main by looking at the root Caller.
    -func findMainPath() string {
    -	pc := make([]uintptr, 100)
    -	n := runtime.Callers(2, pc)
    -	frames := runtime.CallersFrames(pc[:n])
    -	for {
    -		frame, more := frames.Next()
    -		// Tests won't have package main, instead they have testing.tRunner
    -		if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
    -			return frame.File
    -		}
    -		if !more {
    -			break
    -		}
    -	}
    -	return ""
    -}
    -
    -func installHealthChecker(mux *http.ServeMux) {
    -	// If no health check handler has been installed by this point, add a trivial one.
    -	const healthPath = "/_ah/health"
    -	hreq := &http.Request{
    -		Method: "GET",
    -		URL: &url.URL{
    -			Path: healthPath,
    -		},
    -	}
    -	if _, pat := mux.Handler(hreq); pat != healthPath {
    -		mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
    -			io.WriteString(w, "ok")
    -		})
    -	}
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
    deleted file mode 100644
    index c4ba63bb48..0000000000
    --- a/vendor/google.golang.org/appengine/internal/metadata.go
    +++ /dev/null
    @@ -1,60 +0,0 @@
    -// Copyright 2014 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -// This file has code for accessing metadata.
    -//
    -// References:
    -//	https://cloud.google.com/compute/docs/metadata
    -
    -import (
    -	"fmt"
    -	"io/ioutil"
    -	"net/http"
    -	"net/url"
    -)
    -
    -const (
    -	metadataHost = "metadata"
    -	metadataPath = "/computeMetadata/v1/"
    -)
    -
    -var (
    -	metadataRequestHeaders = http.Header{
    -		"Metadata-Flavor": []string{"Google"},
    -	}
    -)
    -
    -// TODO(dsymonds): Do we need to support default values, like Python?
    -func mustGetMetadata(key string) []byte {
    -	b, err := getMetadata(key)
    -	if err != nil {
    -		panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
    -	}
    -	return b
    -}
    -
    -func getMetadata(key string) ([]byte, error) {
    -	// TODO(dsymonds): May need to use url.Parse to support keys with query args.
    -	req := &http.Request{
    -		Method: "GET",
    -		URL: &url.URL{
    -			Scheme: "http",
    -			Host:   metadataHost,
    -			Path:   metadataPath + key,
    -		},
    -		Header: metadataRequestHeaders,
    -		Host:   metadataHost,
    -	}
    -	resp, err := http.DefaultClient.Do(req)
    -	if err != nil {
    -		return nil, err
    -	}
    -	defer resp.Body.Close()
    -	if resp.StatusCode != 200 {
    -		return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
    -	}
    -	return ioutil.ReadAll(resp.Body)
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
    deleted file mode 100644
    index ddfc0c04a1..0000000000
    --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
    +++ /dev/null
    @@ -1,786 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/modules/modules_service.proto
    -
    -package modules
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type ModulesServiceError_ErrorCode int32
    -
    -const (
    -	ModulesServiceError_OK                ModulesServiceError_ErrorCode = 0
    -	ModulesServiceError_INVALID_MODULE    ModulesServiceError_ErrorCode = 1
    -	ModulesServiceError_INVALID_VERSION   ModulesServiceError_ErrorCode = 2
    -	ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
    -	ModulesServiceError_TRANSIENT_ERROR   ModulesServiceError_ErrorCode = 4
    -	ModulesServiceError_UNEXPECTED_STATE  ModulesServiceError_ErrorCode = 5
    -)
    -
    -var ModulesServiceError_ErrorCode_name = map[int32]string{
    -	0: "OK",
    -	1: "INVALID_MODULE",
    -	2: "INVALID_VERSION",
    -	3: "INVALID_INSTANCES",
    -	4: "TRANSIENT_ERROR",
    -	5: "UNEXPECTED_STATE",
    -}
    -var ModulesServiceError_ErrorCode_value = map[string]int32{
    -	"OK":                0,
    -	"INVALID_MODULE":    1,
    -	"INVALID_VERSION":   2,
    -	"INVALID_INSTANCES": 3,
    -	"TRANSIENT_ERROR":   4,
    -	"UNEXPECTED_STATE":  5,
    -}
    -
    -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
    -	p := new(ModulesServiceError_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x ModulesServiceError_ErrorCode) String() string {
    -	return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
    -}
    -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = ModulesServiceError_ErrorCode(value)
    -	return nil
    -}
    -func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0}
    -}
    -
    -type ModulesServiceError struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *ModulesServiceError) Reset()         { *m = ModulesServiceError{} }
    -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
    -func (*ModulesServiceError) ProtoMessage()    {}
    -func (*ModulesServiceError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0}
    -}
    -func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b)
    -}
    -func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic)
    -}
    -func (dst *ModulesServiceError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ModulesServiceError.Merge(dst, src)
    -}
    -func (m *ModulesServiceError) XXX_Size() int {
    -	return xxx_messageInfo_ModulesServiceError.Size(m)
    -}
    -func (m *ModulesServiceError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ModulesServiceError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo
    -
    -type GetModulesRequest struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetModulesRequest) Reset()         { *m = GetModulesRequest{} }
    -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetModulesRequest) ProtoMessage()    {}
    -func (*GetModulesRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1}
    -}
    -func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b)
    -}
    -func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetModulesRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetModulesRequest.Merge(dst, src)
    -}
    -func (m *GetModulesRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetModulesRequest.Size(m)
    -}
    -func (m *GetModulesRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetModulesRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo
    -
    -type GetModulesResponse struct {
    -	Module               []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetModulesResponse) Reset()         { *m = GetModulesResponse{} }
    -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetModulesResponse) ProtoMessage()    {}
    -func (*GetModulesResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2}
    -}
    -func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b)
    -}
    -func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetModulesResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetModulesResponse.Merge(dst, src)
    -}
    -func (m *GetModulesResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetModulesResponse.Size(m)
    -}
    -func (m *GetModulesResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetModulesResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo
    -
    -func (m *GetModulesResponse) GetModule() []string {
    -	if m != nil {
    -		return m.Module
    -	}
    -	return nil
    -}
    -
    -type GetVersionsRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetVersionsRequest) Reset()         { *m = GetVersionsRequest{} }
    -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetVersionsRequest) ProtoMessage()    {}
    -func (*GetVersionsRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3}
    -}
    -func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b)
    -}
    -func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetVersionsRequest.Merge(dst, src)
    -}
    -func (m *GetVersionsRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetVersionsRequest.Size(m)
    -}
    -func (m *GetVersionsRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo
    -
    -func (m *GetVersionsRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -type GetVersionsResponse struct {
    -	Version              []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetVersionsResponse) Reset()         { *m = GetVersionsResponse{} }
    -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetVersionsResponse) ProtoMessage()    {}
    -func (*GetVersionsResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4}
    -}
    -func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b)
    -}
    -func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetVersionsResponse.Merge(dst, src)
    -}
    -func (m *GetVersionsResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetVersionsResponse.Size(m)
    -}
    -func (m *GetVersionsResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo
    -
    -func (m *GetVersionsResponse) GetVersion() []string {
    -	if m != nil {
    -		return m.Version
    -	}
    -	return nil
    -}
    -
    -type GetDefaultVersionRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetDefaultVersionRequest) Reset()         { *m = GetDefaultVersionRequest{} }
    -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetDefaultVersionRequest) ProtoMessage()    {}
    -func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5}
    -}
    -func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b)
    -}
    -func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src)
    -}
    -func (m *GetDefaultVersionRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetDefaultVersionRequest.Size(m)
    -}
    -func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo
    -
    -func (m *GetDefaultVersionRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -type GetDefaultVersionResponse struct {
    -	Version              *string  `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetDefaultVersionResponse) Reset()         { *m = GetDefaultVersionResponse{} }
    -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetDefaultVersionResponse) ProtoMessage()    {}
    -func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6}
    -}
    -func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b)
    -}
    -func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src)
    -}
    -func (m *GetDefaultVersionResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetDefaultVersionResponse.Size(m)
    -}
    -func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo
    -
    -func (m *GetDefaultVersionResponse) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -type GetNumInstancesRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	Version              *string  `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetNumInstancesRequest) Reset()         { *m = GetNumInstancesRequest{} }
    -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetNumInstancesRequest) ProtoMessage()    {}
    -func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7}
    -}
    -func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b)
    -}
    -func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src)
    -}
    -func (m *GetNumInstancesRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetNumInstancesRequest.Size(m)
    -}
    -func (m *GetNumInstancesRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo
    -
    -func (m *GetNumInstancesRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -func (m *GetNumInstancesRequest) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -type GetNumInstancesResponse struct {
    -	Instances            *int64   `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetNumInstancesResponse) Reset()         { *m = GetNumInstancesResponse{} }
    -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetNumInstancesResponse) ProtoMessage()    {}
    -func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8}
    -}
    -func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b)
    -}
    -func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src)
    -}
    -func (m *GetNumInstancesResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetNumInstancesResponse.Size(m)
    -}
    -func (m *GetNumInstancesResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo
    -
    -func (m *GetNumInstancesResponse) GetInstances() int64 {
    -	if m != nil && m.Instances != nil {
    -		return *m.Instances
    -	}
    -	return 0
    -}
    -
    -type SetNumInstancesRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	Version              *string  `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
    -	Instances            *int64   `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *SetNumInstancesRequest) Reset()         { *m = SetNumInstancesRequest{} }
    -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
    -func (*SetNumInstancesRequest) ProtoMessage()    {}
    -func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9}
    -}
    -func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b)
    -}
    -func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src)
    -}
    -func (m *SetNumInstancesRequest) XXX_Size() int {
    -	return xxx_messageInfo_SetNumInstancesRequest.Size(m)
    -}
    -func (m *SetNumInstancesRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo
    -
    -func (m *SetNumInstancesRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -func (m *SetNumInstancesRequest) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -func (m *SetNumInstancesRequest) GetInstances() int64 {
    -	if m != nil && m.Instances != nil {
    -		return *m.Instances
    -	}
    -	return 0
    -}
    -
    -type SetNumInstancesResponse struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *SetNumInstancesResponse) Reset()         { *m = SetNumInstancesResponse{} }
    -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
    -func (*SetNumInstancesResponse) ProtoMessage()    {}
    -func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10}
    -}
    -func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b)
    -}
    -func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src)
    -}
    -func (m *SetNumInstancesResponse) XXX_Size() int {
    -	return xxx_messageInfo_SetNumInstancesResponse.Size(m)
    -}
    -func (m *SetNumInstancesResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo
    -
    -type StartModuleRequest struct {
    -	Module               *string  `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
    -	Version              *string  `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *StartModuleRequest) Reset()         { *m = StartModuleRequest{} }
    -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
    -func (*StartModuleRequest) ProtoMessage()    {}
    -func (*StartModuleRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11}
    -}
    -func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b)
    -}
    -func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *StartModuleRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StartModuleRequest.Merge(dst, src)
    -}
    -func (m *StartModuleRequest) XXX_Size() int {
    -	return xxx_messageInfo_StartModuleRequest.Size(m)
    -}
    -func (m *StartModuleRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StartModuleRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo
    -
    -func (m *StartModuleRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -func (m *StartModuleRequest) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -type StartModuleResponse struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *StartModuleResponse) Reset()         { *m = StartModuleResponse{} }
    -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
    -func (*StartModuleResponse) ProtoMessage()    {}
    -func (*StartModuleResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12}
    -}
    -func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b)
    -}
    -func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *StartModuleResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StartModuleResponse.Merge(dst, src)
    -}
    -func (m *StartModuleResponse) XXX_Size() int {
    -	return xxx_messageInfo_StartModuleResponse.Size(m)
    -}
    -func (m *StartModuleResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StartModuleResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo
    -
    -type StopModuleRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	Version              *string  `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *StopModuleRequest) Reset()         { *m = StopModuleRequest{} }
    -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
    -func (*StopModuleRequest) ProtoMessage()    {}
    -func (*StopModuleRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13}
    -}
    -func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b)
    -}
    -func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *StopModuleRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StopModuleRequest.Merge(dst, src)
    -}
    -func (m *StopModuleRequest) XXX_Size() int {
    -	return xxx_messageInfo_StopModuleRequest.Size(m)
    -}
    -func (m *StopModuleRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StopModuleRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo
    -
    -func (m *StopModuleRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -func (m *StopModuleRequest) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -type StopModuleResponse struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *StopModuleResponse) Reset()         { *m = StopModuleResponse{} }
    -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
    -func (*StopModuleResponse) ProtoMessage()    {}
    -func (*StopModuleResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14}
    -}
    -func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b)
    -}
    -func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *StopModuleResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StopModuleResponse.Merge(dst, src)
    -}
    -func (m *StopModuleResponse) XXX_Size() int {
    -	return xxx_messageInfo_StopModuleResponse.Size(m)
    -}
    -func (m *StopModuleResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StopModuleResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo
    -
    -type GetHostnameRequest struct {
    -	Module               *string  `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
    -	Version              *string  `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
    -	Instance             *string  `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetHostnameRequest) Reset()         { *m = GetHostnameRequest{} }
    -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
    -func (*GetHostnameRequest) ProtoMessage()    {}
    -func (*GetHostnameRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15}
    -}
    -func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b)
    -}
    -func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetHostnameRequest.Merge(dst, src)
    -}
    -func (m *GetHostnameRequest) XXX_Size() int {
    -	return xxx_messageInfo_GetHostnameRequest.Size(m)
    -}
    -func (m *GetHostnameRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo
    -
    -func (m *GetHostnameRequest) GetModule() string {
    -	if m != nil && m.Module != nil {
    -		return *m.Module
    -	}
    -	return ""
    -}
    -
    -func (m *GetHostnameRequest) GetVersion() string {
    -	if m != nil && m.Version != nil {
    -		return *m.Version
    -	}
    -	return ""
    -}
    -
    -func (m *GetHostnameRequest) GetInstance() string {
    -	if m != nil && m.Instance != nil {
    -		return *m.Instance
    -	}
    -	return ""
    -}
    -
    -type GetHostnameResponse struct {
    -	Hostname             *string  `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *GetHostnameResponse) Reset()         { *m = GetHostnameResponse{} }
    -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
    -func (*GetHostnameResponse) ProtoMessage()    {}
    -func (*GetHostnameResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16}
    -}
    -func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b)
    -}
    -func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_GetHostnameResponse.Merge(dst, src)
    -}
    -func (m *GetHostnameResponse) XXX_Size() int {
    -	return xxx_messageInfo_GetHostnameResponse.Size(m)
    -}
    -func (m *GetHostnameResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo
    -
    -func (m *GetHostnameResponse) GetHostname() string {
    -	if m != nil && m.Hostname != nil {
    -		return *m.Hostname
    -	}
    -	return ""
    -}
    -
    -func init() {
    -	proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError")
    -	proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest")
    -	proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse")
    -	proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest")
    -	proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse")
    -	proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest")
    -	proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse")
    -	proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest")
    -	proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse")
    -	proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest")
    -	proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse")
    -	proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest")
    -	proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse")
    -	proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest")
    -	proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse")
    -	proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest")
    -	proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a)
    -}
    -
    -var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{
    -	// 457 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30,
    -	0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c,
    -	0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a,
    -	0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6,
    -	0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e,
    -	0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79,
    -	0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c,
    -	0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05,
    -	0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8,
    -	0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34,
    -	0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16,
    -	0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd,
    -	0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72,
    -	0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f,
    -	0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36,
    -	0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b,
    -	0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41,
    -	0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8,
    -	0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad,
    -	0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8,
    -	0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39,
    -	0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec,
    -	0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc,
    -	0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda,
    -	0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea,
    -	0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd,
    -	0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18,
    -	0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff,
    -	0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
    deleted file mode 100644
    index d29f0065a2..0000000000
    --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
    +++ /dev/null
    @@ -1,80 +0,0 @@
    -syntax = "proto2";
    -option go_package = "modules";
    -
    -package appengine;
    -
    -message ModulesServiceError {
    -  enum ErrorCode {
    -    OK  = 0;
    -    INVALID_MODULE = 1;
    -    INVALID_VERSION = 2;
    -    INVALID_INSTANCES = 3;
    -    TRANSIENT_ERROR = 4;
    -    UNEXPECTED_STATE = 5;
    -  }
    -}
    -
    -message GetModulesRequest {
    -}
    -
    -message GetModulesResponse {
    -  repeated string module = 1;
    -}
    -
    -message GetVersionsRequest {
    -  optional string module = 1;
    -}
    -
    -message GetVersionsResponse {
    -  repeated string version = 1;
    -}
    -
    -message GetDefaultVersionRequest {
    -  optional string module = 1;
    -}
    -
    -message GetDefaultVersionResponse {
    -  required string version = 1;
    -}
    -
    -message GetNumInstancesRequest {
    -  optional string module = 1;
    -  optional string version = 2;
    -}
    -
    -message GetNumInstancesResponse {
    -  required int64 instances = 1;
    -}
    -
    -message SetNumInstancesRequest {
    -  optional string module = 1;
    -  optional string version = 2;
    -  required int64 instances = 3;
    -}
    -
    -message SetNumInstancesResponse {}
    -
    -message StartModuleRequest {
    -  required string module = 1;
    -  required string version = 2;
    -}
    -
    -message StartModuleResponse {}
    -
    -message StopModuleRequest {
    -  optional string module = 1;
    -  optional string version = 2;
    -}
    -
    -message StopModuleResponse {}
    -
    -message GetHostnameRequest {
    -  optional string module = 1;
    -  optional string version = 2;
    -  optional string instance = 3;
    -}
    -
    -message GetHostnameResponse {
    -  required string hostname = 1;
    -}
    -
    diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
    deleted file mode 100644
    index fe429720e1..0000000000
    --- a/vendor/google.golang.org/appengine/internal/net.go
    +++ /dev/null
    @@ -1,56 +0,0 @@
    -// Copyright 2014 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -// This file implements a network dialer that limits the number of concurrent connections.
    -// It is only used for API calls.
    -
    -import (
    -	"log"
    -	"net"
    -	"runtime"
    -	"sync"
    -	"time"
    -)
    -
    -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
    -
    -func limitRelease() {
    -	// non-blocking
    -	select {
    -	case <-limitSem:
    -	default:
    -		// This should not normally happen.
    -		log.Print("appengine: unbalanced limitSem release!")
    -	}
    -}
    -
    -func limitDial(network, addr string) (net.Conn, error) {
    -	limitSem <- 1
    -
    -	// Dial with a timeout in case the API host is MIA.
    -	// The connection should normally be very fast.
    -	conn, err := net.DialTimeout(network, addr, 10*time.Second)
    -	if err != nil {
    -		limitRelease()
    -		return nil, err
    -	}
    -	lc := &limitConn{Conn: conn}
    -	runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
    -	return lc, nil
    -}
    -
    -type limitConn struct {
    -	close sync.Once
    -	net.Conn
    -}
    -
    -func (lc *limitConn) Close() error {
    -	defer lc.close.Do(func() {
    -		limitRelease()
    -		runtime.SetFinalizer(lc, nil)
    -	})
    -	return lc.Conn.Close()
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
    deleted file mode 100644
    index 2fdb546a63..0000000000
    --- a/vendor/google.golang.org/appengine/internal/regen.sh
    +++ /dev/null
    @@ -1,40 +0,0 @@
    -#!/bin/bash -e
    -#
    -# This script rebuilds the generated code for the protocol buffers.
    -# To run this you will need protoc and goprotobuf installed;
    -# see https://github.com/golang/protobuf for instructions.
    -
    -PKG=google.golang.org/appengine
    -
    -function die() {
    -	echo 1>&2 $*
    -	exit 1
    -}
    -
    -# Sanity check that the right tools are accessible.
    -for tool in go protoc protoc-gen-go; do
    -	q=$(which $tool) || die "didn't find $tool"
    -	echo 1>&2 "$tool: $q"
    -done
    -
    -echo -n 1>&2 "finding package dir... "
    -pkgdir=$(go list -f '{{.Dir}}' $PKG)
    -echo 1>&2 $pkgdir
    -base=$(echo $pkgdir | sed "s,/$PKG\$,,")
    -echo 1>&2 "base: $base"
    -cd $base
    -
    -# Run protoc once per package.
    -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
    -	echo 1>&2 "* $dir"
    -	protoc --go_out=. $dir/*.proto
    -done
    -
    -for f in $(find $PKG/internal -name '*.pb.go'); do
    -  # Remove proto.RegisterEnum calls.
    -  # These cause duplicate registration panics when these packages
    -  # are used on classic App Engine. proto.RegisterEnum only affects
    -  # parsing the text format; we don't care about that.
    -  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
    -  sed -i '/proto.RegisterEnum/d' $f
    -done
    diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
    deleted file mode 100644
    index 8d782a38e1..0000000000
    --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
    +++ /dev/null
    @@ -1,361 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
    -
    -package remote_api
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type RpcError_ErrorCode int32
    -
    -const (
    -	RpcError_UNKNOWN             RpcError_ErrorCode = 0
    -	RpcError_CALL_NOT_FOUND      RpcError_ErrorCode = 1
    -	RpcError_PARSE_ERROR         RpcError_ErrorCode = 2
    -	RpcError_SECURITY_VIOLATION  RpcError_ErrorCode = 3
    -	RpcError_OVER_QUOTA          RpcError_ErrorCode = 4
    -	RpcError_REQUEST_TOO_LARGE   RpcError_ErrorCode = 5
    -	RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
    -	RpcError_FEATURE_DISABLED    RpcError_ErrorCode = 7
    -	RpcError_BAD_REQUEST         RpcError_ErrorCode = 8
    -	RpcError_RESPONSE_TOO_LARGE  RpcError_ErrorCode = 9
    -	RpcError_CANCELLED           RpcError_ErrorCode = 10
    -	RpcError_REPLAY_ERROR        RpcError_ErrorCode = 11
    -	RpcError_DEADLINE_EXCEEDED   RpcError_ErrorCode = 12
    -)
    -
    -var RpcError_ErrorCode_name = map[int32]string{
    -	0:  "UNKNOWN",
    -	1:  "CALL_NOT_FOUND",
    -	2:  "PARSE_ERROR",
    -	3:  "SECURITY_VIOLATION",
    -	4:  "OVER_QUOTA",
    -	5:  "REQUEST_TOO_LARGE",
    -	6:  "CAPABILITY_DISABLED",
    -	7:  "FEATURE_DISABLED",
    -	8:  "BAD_REQUEST",
    -	9:  "RESPONSE_TOO_LARGE",
    -	10: "CANCELLED",
    -	11: "REPLAY_ERROR",
    -	12: "DEADLINE_EXCEEDED",
    -}
    -var RpcError_ErrorCode_value = map[string]int32{
    -	"UNKNOWN":             0,
    -	"CALL_NOT_FOUND":      1,
    -	"PARSE_ERROR":         2,
    -	"SECURITY_VIOLATION":  3,
    -	"OVER_QUOTA":          4,
    -	"REQUEST_TOO_LARGE":   5,
    -	"CAPABILITY_DISABLED": 6,
    -	"FEATURE_DISABLED":    7,
    -	"BAD_REQUEST":         8,
    -	"RESPONSE_TOO_LARGE":  9,
    -	"CANCELLED":           10,
    -	"REPLAY_ERROR":        11,
    -	"DEADLINE_EXCEEDED":   12,
    -}
    -
    -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
    -	p := new(RpcError_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x RpcError_ErrorCode) String() string {
    -	return proto.EnumName(RpcError_ErrorCode_name, int32(x))
    -}
    -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = RpcError_ErrorCode(value)
    -	return nil
    -}
    -func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
    -}
    -
    -type Request struct {
    -	ServiceName          *string  `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
    -	Method               *string  `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
    -	Request              []byte   `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
    -	RequestId            *string  `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *Request) Reset()         { *m = Request{} }
    -func (m *Request) String() string { return proto.CompactTextString(m) }
    -func (*Request) ProtoMessage()    {}
    -func (*Request) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
    -}
    -func (m *Request) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Request.Unmarshal(m, b)
    -}
    -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Request.Marshal(b, m, deterministic)
    -}
    -func (dst *Request) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Request.Merge(dst, src)
    -}
    -func (m *Request) XXX_Size() int {
    -	return xxx_messageInfo_Request.Size(m)
    -}
    -func (m *Request) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Request.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Request proto.InternalMessageInfo
    -
    -func (m *Request) GetServiceName() string {
    -	if m != nil && m.ServiceName != nil {
    -		return *m.ServiceName
    -	}
    -	return ""
    -}
    -
    -func (m *Request) GetMethod() string {
    -	if m != nil && m.Method != nil {
    -		return *m.Method
    -	}
    -	return ""
    -}
    -
    -func (m *Request) GetRequest() []byte {
    -	if m != nil {
    -		return m.Request
    -	}
    -	return nil
    -}
    -
    -func (m *Request) GetRequestId() string {
    -	if m != nil && m.RequestId != nil {
    -		return *m.RequestId
    -	}
    -	return ""
    -}
    -
    -type ApplicationError struct {
    -	Code                 *int32   `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
    -	Detail               *string  `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *ApplicationError) Reset()         { *m = ApplicationError{} }
    -func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
    -func (*ApplicationError) ProtoMessage()    {}
    -func (*ApplicationError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
    -}
    -func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
    -}
    -func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
    -}
    -func (dst *ApplicationError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ApplicationError.Merge(dst, src)
    -}
    -func (m *ApplicationError) XXX_Size() int {
    -	return xxx_messageInfo_ApplicationError.Size(m)
    -}
    -func (m *ApplicationError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ApplicationError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
    -
    -func (m *ApplicationError) GetCode() int32 {
    -	if m != nil && m.Code != nil {
    -		return *m.Code
    -	}
    -	return 0
    -}
    -
    -func (m *ApplicationError) GetDetail() string {
    -	if m != nil && m.Detail != nil {
    -		return *m.Detail
    -	}
    -	return ""
    -}
    -
    -type RpcError struct {
    -	Code                 *int32   `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
    -	Detail               *string  `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *RpcError) Reset()         { *m = RpcError{} }
    -func (m *RpcError) String() string { return proto.CompactTextString(m) }
    -func (*RpcError) ProtoMessage()    {}
    -func (*RpcError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
    -}
    -func (m *RpcError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_RpcError.Unmarshal(m, b)
    -}
    -func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
    -}
    -func (dst *RpcError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_RpcError.Merge(dst, src)
    -}
    -func (m *RpcError) XXX_Size() int {
    -	return xxx_messageInfo_RpcError.Size(m)
    -}
    -func (m *RpcError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_RpcError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_RpcError proto.InternalMessageInfo
    -
    -func (m *RpcError) GetCode() int32 {
    -	if m != nil && m.Code != nil {
    -		return *m.Code
    -	}
    -	return 0
    -}
    -
    -func (m *RpcError) GetDetail() string {
    -	if m != nil && m.Detail != nil {
    -		return *m.Detail
    -	}
    -	return ""
    -}
    -
    -type Response struct {
    -	Response             []byte            `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
    -	Exception            []byte            `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
    -	ApplicationError     *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
    -	JavaException        []byte            `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
    -	RpcError             *RpcError         `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
    -	XXX_unrecognized     []byte            `json:"-"`
    -	XXX_sizecache        int32             `json:"-"`
    -}
    -
    -func (m *Response) Reset()         { *m = Response{} }
    -func (m *Response) String() string { return proto.CompactTextString(m) }
    -func (*Response) ProtoMessage()    {}
    -func (*Response) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
    -}
    -func (m *Response) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_Response.Unmarshal(m, b)
    -}
    -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_Response.Marshal(b, m, deterministic)
    -}
    -func (dst *Response) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_Response.Merge(dst, src)
    -}
    -func (m *Response) XXX_Size() int {
    -	return xxx_messageInfo_Response.Size(m)
    -}
    -func (m *Response) XXX_DiscardUnknown() {
    -	xxx_messageInfo_Response.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_Response proto.InternalMessageInfo
    -
    -func (m *Response) GetResponse() []byte {
    -	if m != nil {
    -		return m.Response
    -	}
    -	return nil
    -}
    -
    -func (m *Response) GetException() []byte {
    -	if m != nil {
    -		return m.Exception
    -	}
    -	return nil
    -}
    -
    -func (m *Response) GetApplicationError() *ApplicationError {
    -	if m != nil {
    -		return m.ApplicationError
    -	}
    -	return nil
    -}
    -
    -func (m *Response) GetJavaException() []byte {
    -	if m != nil {
    -		return m.JavaException
    -	}
    -	return nil
    -}
    -
    -func (m *Response) GetRpcError() *RpcError {
    -	if m != nil {
    -		return m.RpcError
    -	}
    -	return nil
    -}
    -
    -func init() {
    -	proto.RegisterType((*Request)(nil), "remote_api.Request")
    -	proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
    -	proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
    -	proto.RegisterType((*Response)(nil), "remote_api.Response")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
    -}
    -
    -var fileDescriptor_remote_api_1978114ec33a273d = []byte{
    -	// 531 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
    -	0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
    -	0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
    -	0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
    -	0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
    -	0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
    -	0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
    -	0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
    -	0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
    -	0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
    -	0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
    -	0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
    -	0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
    -	0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
    -	0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
    -	0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
    -	0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
    -	0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
    -	0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
    -	0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
    -	0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
    -	0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
    -	0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
    -	0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
    -	0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
    -	0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
    -	0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
    -	0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
    -	0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
    -	0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
    -	0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
    -	0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
    -	0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
    -	0x03, 0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
    deleted file mode 100644
    index f21763a4e2..0000000000
    --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -syntax = "proto2";
    -option go_package = "remote_api";
    -
    -package remote_api;
    -
    -message Request {
    -  required string service_name = 2;
    -  required string method = 3;
    -  required bytes request = 4;
    -  optional string request_id = 5;
    -}
    -
    -message ApplicationError {
    -  required int32 code = 1;
    -  required string detail = 2;
    -}
    -
    -message RpcError {
    -  enum ErrorCode {
    -    UNKNOWN = 0;
    -    CALL_NOT_FOUND = 1;
    -    PARSE_ERROR = 2;
    -    SECURITY_VIOLATION = 3;
    -    OVER_QUOTA = 4;
    -    REQUEST_TOO_LARGE = 5;
    -    CAPABILITY_DISABLED = 6;
    -    FEATURE_DISABLED = 7;
    -    BAD_REQUEST = 8;
    -    RESPONSE_TOO_LARGE = 9;
    -    CANCELLED = 10;
    -    REPLAY_ERROR = 11;
    -    DEADLINE_EXCEEDED = 12;
    -  }
    -  required int32 code = 1;
    -  optional string detail = 2;
    -}
    -
    -message Response {
    -  optional bytes response = 1;
    -  optional bytes exception = 2;
    -  optional ApplicationError application_error = 3;
    -  optional bytes java_exception = 4;
    -  optional RpcError rpc_error = 5;
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
    deleted file mode 100644
    index 2ae8ab9fa4..0000000000
    --- a/vendor/google.golang.org/appengine/internal/transaction.go
    +++ /dev/null
    @@ -1,115 +0,0 @@
    -// Copyright 2014 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package internal
    -
    -// This file implements hooks for applying datastore transactions.
    -
    -import (
    -	"context"
    -	"errors"
    -	"reflect"
    -
    -	"github.com/golang/protobuf/proto"
    -
    -	basepb "google.golang.org/appengine/internal/base"
    -	pb "google.golang.org/appengine/internal/datastore"
    -)
    -
    -var transactionSetters = make(map[reflect.Type]reflect.Value)
    -
    -// RegisterTransactionSetter registers a function that sets transaction information
    -// in a protocol buffer message. f should be a function with two arguments,
    -// the first being a protocol buffer type, and the second being *datastore.Transaction.
    -func RegisterTransactionSetter(f interface{}) {
    -	v := reflect.ValueOf(f)
    -	transactionSetters[v.Type().In(0)] = v
    -}
    -
    -// applyTransaction applies the transaction t to message pb
    -// by using the relevant setter passed to RegisterTransactionSetter.
    -func applyTransaction(pb proto.Message, t *pb.Transaction) {
    -	v := reflect.ValueOf(pb)
    -	if f, ok := transactionSetters[v.Type()]; ok {
    -		f.Call([]reflect.Value{v, reflect.ValueOf(t)})
    -	}
    -}
    -
    -var transactionKey = "used for *Transaction"
    -
    -func transactionFromContext(ctx context.Context) *transaction {
    -	t, _ := ctx.Value(&transactionKey).(*transaction)
    -	return t
    -}
    -
    -func withTransaction(ctx context.Context, t *transaction) context.Context {
    -	return context.WithValue(ctx, &transactionKey, t)
    -}
    -
    -type transaction struct {
    -	transaction pb.Transaction
    -	finished    bool
    -}
    -
    -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
    -
    -func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
    -	if transactionFromContext(c) != nil {
    -		return nil, errors.New("nested transactions are not supported")
    -	}
    -
    -	// Begin the transaction.
    -	t := &transaction{}
    -	req := &pb.BeginTransactionRequest{
    -		App: proto.String(FullyQualifiedAppID(c)),
    -	}
    -	if xg {
    -		req.AllowMultipleEg = proto.Bool(true)
    -	}
    -	if previousTransaction != nil {
    -		req.PreviousTransaction = previousTransaction
    -	}
    -	if readOnly {
    -		req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
    -	} else {
    -		req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
    -	}
    -	if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
    -		return nil, err
    -	}
    -
    -	// Call f, rolling back the transaction if f returns a non-nil error, or panics.
    -	// The panic is not recovered.
    -	defer func() {
    -		if t.finished {
    -			return
    -		}
    -		t.finished = true
    -		// Ignore the error return value, since we are already returning a non-nil
    -		// error (or we're panicking).
    -		Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
    -	}()
    -	if err := f(withTransaction(c, t)); err != nil {
    -		return &t.transaction, err
    -	}
    -	t.finished = true
    -
    -	// Commit the transaction.
    -	res := &pb.CommitResponse{}
    -	err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
    -	if ae, ok := err.(*APIError); ok {
    -		/* TODO: restore this conditional
    -		if appengine.IsDevAppServer() {
    -		*/
    -		// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
    -		// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
    -		if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
    -			return &t.transaction, ErrConcurrentTransaction
    -		}
    -		if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
    -			return &t.transaction, ErrConcurrentTransaction
    -		}
    -	}
    -	return &t.transaction, err
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
    deleted file mode 100644
    index 5f727750ad..0000000000
    --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -// Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
    -
    -package urlfetch
    -
    -import proto "github.com/golang/protobuf/proto"
    -import fmt "fmt"
    -import math "math"
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
    -
    -type URLFetchServiceError_ErrorCode int32
    -
    -const (
    -	URLFetchServiceError_OK                       URLFetchServiceError_ErrorCode = 0
    -	URLFetchServiceError_INVALID_URL              URLFetchServiceError_ErrorCode = 1
    -	URLFetchServiceError_FETCH_ERROR              URLFetchServiceError_ErrorCode = 2
    -	URLFetchServiceError_UNSPECIFIED_ERROR        URLFetchServiceError_ErrorCode = 3
    -	URLFetchServiceError_RESPONSE_TOO_LARGE       URLFetchServiceError_ErrorCode = 4
    -	URLFetchServiceError_DEADLINE_EXCEEDED        URLFetchServiceError_ErrorCode = 5
    -	URLFetchServiceError_SSL_CERTIFICATE_ERROR    URLFetchServiceError_ErrorCode = 6
    -	URLFetchServiceError_DNS_ERROR                URLFetchServiceError_ErrorCode = 7
    -	URLFetchServiceError_CLOSED                   URLFetchServiceError_ErrorCode = 8
    -	URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
    -	URLFetchServiceError_TOO_MANY_REDIRECTS       URLFetchServiceError_ErrorCode = 10
    -	URLFetchServiceError_MALFORMED_REPLY          URLFetchServiceError_ErrorCode = 11
    -	URLFetchServiceError_CONNECTION_ERROR         URLFetchServiceError_ErrorCode = 12
    -)
    -
    -var URLFetchServiceError_ErrorCode_name = map[int32]string{
    -	0:  "OK",
    -	1:  "INVALID_URL",
    -	2:  "FETCH_ERROR",
    -	3:  "UNSPECIFIED_ERROR",
    -	4:  "RESPONSE_TOO_LARGE",
    -	5:  "DEADLINE_EXCEEDED",
    -	6:  "SSL_CERTIFICATE_ERROR",
    -	7:  "DNS_ERROR",
    -	8:  "CLOSED",
    -	9:  "INTERNAL_TRANSIENT_ERROR",
    -	10: "TOO_MANY_REDIRECTS",
    -	11: "MALFORMED_REPLY",
    -	12: "CONNECTION_ERROR",
    -}
    -var URLFetchServiceError_ErrorCode_value = map[string]int32{
    -	"OK":                       0,
    -	"INVALID_URL":              1,
    -	"FETCH_ERROR":              2,
    -	"UNSPECIFIED_ERROR":        3,
    -	"RESPONSE_TOO_LARGE":       4,
    -	"DEADLINE_EXCEEDED":        5,
    -	"SSL_CERTIFICATE_ERROR":    6,
    -	"DNS_ERROR":                7,
    -	"CLOSED":                   8,
    -	"INTERNAL_TRANSIENT_ERROR": 9,
    -	"TOO_MANY_REDIRECTS":       10,
    -	"MALFORMED_REPLY":          11,
    -	"CONNECTION_ERROR":         12,
    -}
    -
    -func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
    -	p := new(URLFetchServiceError_ErrorCode)
    -	*p = x
    -	return p
    -}
    -func (x URLFetchServiceError_ErrorCode) String() string {
    -	return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
    -}
    -func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
    -	if err != nil {
    -		return err
    -	}
    -	*x = URLFetchServiceError_ErrorCode(value)
    -	return nil
    -}
    -func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
    -}
    -
    -type URLFetchRequest_RequestMethod int32
    -
    -const (
    -	URLFetchRequest_GET    URLFetchRequest_RequestMethod = 1
    -	URLFetchRequest_POST   URLFetchRequest_RequestMethod = 2
    -	URLFetchRequest_HEAD   URLFetchRequest_RequestMethod = 3
    -	URLFetchRequest_PUT    URLFetchRequest_RequestMethod = 4
    -	URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
    -	URLFetchRequest_PATCH  URLFetchRequest_RequestMethod = 6
    -)
    -
    -var URLFetchRequest_RequestMethod_name = map[int32]string{
    -	1: "GET",
    -	2: "POST",
    -	3: "HEAD",
    -	4: "PUT",
    -	5: "DELETE",
    -	6: "PATCH",
    -}
    -var URLFetchRequest_RequestMethod_value = map[string]int32{
    -	"GET":    1,
    -	"POST":   2,
    -	"HEAD":   3,
    -	"PUT":    4,
    -	"DELETE": 5,
    -	"PATCH":  6,
    -}
    -
    -func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
    -	p := new(URLFetchRequest_RequestMethod)
    -	*p = x
    -	return p
    -}
    -func (x URLFetchRequest_RequestMethod) String() string {
    -	return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
    -}
    -func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
    -	value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
    -	if err != nil {
    -		return err
    -	}
    -	*x = URLFetchRequest_RequestMethod(value)
    -	return nil
    -}
    -func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
    -}
    -
    -type URLFetchServiceError struct {
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *URLFetchServiceError) Reset()         { *m = URLFetchServiceError{} }
    -func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
    -func (*URLFetchServiceError) ProtoMessage()    {}
    -func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
    -}
    -func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
    -}
    -func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
    -}
    -func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
    -}
    -func (m *URLFetchServiceError) XXX_Size() int {
    -	return xxx_messageInfo_URLFetchServiceError.Size(m)
    -}
    -func (m *URLFetchServiceError) XXX_DiscardUnknown() {
    -	xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
    -
    -type URLFetchRequest struct {
    -	Method                        *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
    -	Url                           *string                        `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
    -	Header                        []*URLFetchRequest_Header      `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
    -	Payload                       []byte                         `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
    -	FollowRedirects               *bool                          `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
    -	Deadline                      *float64                       `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
    -	MustValidateServerCertificate *bool                          `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
    -	XXX_NoUnkeyedLiteral          struct{}                       `json:"-"`
    -	XXX_unrecognized              []byte                         `json:"-"`
    -	XXX_sizecache                 int32                          `json:"-"`
    -}
    -
    -func (m *URLFetchRequest) Reset()         { *m = URLFetchRequest{} }
    -func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
    -func (*URLFetchRequest) ProtoMessage()    {}
    -func (*URLFetchRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
    -}
    -func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
    -}
    -func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
    -}
    -func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_URLFetchRequest.Merge(dst, src)
    -}
    -func (m *URLFetchRequest) XXX_Size() int {
    -	return xxx_messageInfo_URLFetchRequest.Size(m)
    -}
    -func (m *URLFetchRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
    -
    -const Default_URLFetchRequest_FollowRedirects bool = true
    -const Default_URLFetchRequest_MustValidateServerCertificate bool = true
    -
    -func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
    -	if m != nil && m.Method != nil {
    -		return *m.Method
    -	}
    -	return URLFetchRequest_GET
    -}
    -
    -func (m *URLFetchRequest) GetUrl() string {
    -	if m != nil && m.Url != nil {
    -		return *m.Url
    -	}
    -	return ""
    -}
    -
    -func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *URLFetchRequest) GetPayload() []byte {
    -	if m != nil {
    -		return m.Payload
    -	}
    -	return nil
    -}
    -
    -func (m *URLFetchRequest) GetFollowRedirects() bool {
    -	if m != nil && m.FollowRedirects != nil {
    -		return *m.FollowRedirects
    -	}
    -	return Default_URLFetchRequest_FollowRedirects
    -}
    -
    -func (m *URLFetchRequest) GetDeadline() float64 {
    -	if m != nil && m.Deadline != nil {
    -		return *m.Deadline
    -	}
    -	return 0
    -}
    -
    -func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
    -	if m != nil && m.MustValidateServerCertificate != nil {
    -		return *m.MustValidateServerCertificate
    -	}
    -	return Default_URLFetchRequest_MustValidateServerCertificate
    -}
    -
    -type URLFetchRequest_Header struct {
    -	Key                  *string  `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
    -	Value                *string  `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *URLFetchRequest_Header) Reset()         { *m = URLFetchRequest_Header{} }
    -func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
    -func (*URLFetchRequest_Header) ProtoMessage()    {}
    -func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
    -}
    -func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
    -}
    -func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
    -}
    -func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
    -}
    -func (m *URLFetchRequest_Header) XXX_Size() int {
    -	return xxx_messageInfo_URLFetchRequest_Header.Size(m)
    -}
    -func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
    -	xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
    -
    -func (m *URLFetchRequest_Header) GetKey() string {
    -	if m != nil && m.Key != nil {
    -		return *m.Key
    -	}
    -	return ""
    -}
    -
    -func (m *URLFetchRequest_Header) GetValue() string {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return ""
    -}
    -
    -type URLFetchResponse struct {
    -	Content               []byte                     `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
    -	StatusCode            *int32                     `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
    -	Header                []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
    -	ContentWasTruncated   *bool                      `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
    -	ExternalBytesSent     *int64                     `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
    -	ExternalBytesReceived *int64                     `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
    -	FinalUrl              *string                    `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
    -	ApiCpuMilliseconds    *int64                     `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
    -	ApiBytesSent          *int64                     `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
    -	ApiBytesReceived      *int64                     `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
    -	XXX_NoUnkeyedLiteral  struct{}                   `json:"-"`
    -	XXX_unrecognized      []byte                     `json:"-"`
    -	XXX_sizecache         int32                      `json:"-"`
    -}
    -
    -func (m *URLFetchResponse) Reset()         { *m = URLFetchResponse{} }
    -func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
    -func (*URLFetchResponse) ProtoMessage()    {}
    -func (*URLFetchResponse) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
    -}
    -func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
    -}
    -func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
    -}
    -func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_URLFetchResponse.Merge(dst, src)
    -}
    -func (m *URLFetchResponse) XXX_Size() int {
    -	return xxx_messageInfo_URLFetchResponse.Size(m)
    -}
    -func (m *URLFetchResponse) XXX_DiscardUnknown() {
    -	xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
    -
    -const Default_URLFetchResponse_ContentWasTruncated bool = false
    -const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
    -const Default_URLFetchResponse_ApiBytesSent int64 = 0
    -const Default_URLFetchResponse_ApiBytesReceived int64 = 0
    -
    -func (m *URLFetchResponse) GetContent() []byte {
    -	if m != nil {
    -		return m.Content
    -	}
    -	return nil
    -}
    -
    -func (m *URLFetchResponse) GetStatusCode() int32 {
    -	if m != nil && m.StatusCode != nil {
    -		return *m.StatusCode
    -	}
    -	return 0
    -}
    -
    -func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
    -	if m != nil {
    -		return m.Header
    -	}
    -	return nil
    -}
    -
    -func (m *URLFetchResponse) GetContentWasTruncated() bool {
    -	if m != nil && m.ContentWasTruncated != nil {
    -		return *m.ContentWasTruncated
    -	}
    -	return Default_URLFetchResponse_ContentWasTruncated
    -}
    -
    -func (m *URLFetchResponse) GetExternalBytesSent() int64 {
    -	if m != nil && m.ExternalBytesSent != nil {
    -		return *m.ExternalBytesSent
    -	}
    -	return 0
    -}
    -
    -func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
    -	if m != nil && m.ExternalBytesReceived != nil {
    -		return *m.ExternalBytesReceived
    -	}
    -	return 0
    -}
    -
    -func (m *URLFetchResponse) GetFinalUrl() string {
    -	if m != nil && m.FinalUrl != nil {
    -		return *m.FinalUrl
    -	}
    -	return ""
    -}
    -
    -func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
    -	if m != nil && m.ApiCpuMilliseconds != nil {
    -		return *m.ApiCpuMilliseconds
    -	}
    -	return Default_URLFetchResponse_ApiCpuMilliseconds
    -}
    -
    -func (m *URLFetchResponse) GetApiBytesSent() int64 {
    -	if m != nil && m.ApiBytesSent != nil {
    -		return *m.ApiBytesSent
    -	}
    -	return Default_URLFetchResponse_ApiBytesSent
    -}
    -
    -func (m *URLFetchResponse) GetApiBytesReceived() int64 {
    -	if m != nil && m.ApiBytesReceived != nil {
    -		return *m.ApiBytesReceived
    -	}
    -	return Default_URLFetchResponse_ApiBytesReceived
    -}
    -
    -type URLFetchResponse_Header struct {
    -	Key                  *string  `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
    -	Value                *string  `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
    -	XXX_NoUnkeyedLiteral struct{} `json:"-"`
    -	XXX_unrecognized     []byte   `json:"-"`
    -	XXX_sizecache        int32    `json:"-"`
    -}
    -
    -func (m *URLFetchResponse_Header) Reset()         { *m = URLFetchResponse_Header{} }
    -func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
    -func (*URLFetchResponse_Header) ProtoMessage()    {}
    -func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
    -}
    -func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
    -	return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
    -}
    -func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
    -}
    -func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
    -}
    -func (m *URLFetchResponse_Header) XXX_Size() int {
    -	return xxx_messageInfo_URLFetchResponse_Header.Size(m)
    -}
    -func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
    -	xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
    -
    -func (m *URLFetchResponse_Header) GetKey() string {
    -	if m != nil && m.Key != nil {
    -		return *m.Key
    -	}
    -	return ""
    -}
    -
    -func (m *URLFetchResponse_Header) GetValue() string {
    -	if m != nil && m.Value != nil {
    -		return *m.Value
    -	}
    -	return ""
    -}
    -
    -func init() {
    -	proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
    -	proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
    -	proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
    -	proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
    -	proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
    -}
    -
    -func init() {
    -	proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
    -}
    -
    -var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
    -	// 770 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
    -	0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
    -	0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
    -	0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
    -	0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
    -	0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
    -	0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
    -	0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
    -	0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
    -	0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
    -	0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
    -	0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
    -	0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
    -	0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
    -	0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
    -	0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
    -	0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
    -	0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
    -	0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
    -	0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
    -	0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
    -	0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
    -	0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
    -	0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
    -	0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
    -	0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
    -	0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
    -	0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
    -	0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
    -	0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
    -	0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
    -	0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
    -	0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
    -	0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
    -	0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
    -	0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
    -	0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
    -	0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
    -	0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
    -	0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
    -	0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
    -	0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
    -	0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
    -	0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
    -	0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
    -	0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
    -	0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
    -	0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
    -	0x00, 0x00,
    -}
    diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
    deleted file mode 100644
    index f695edf6a9..0000000000
    --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
    +++ /dev/null
    @@ -1,64 +0,0 @@
    -syntax = "proto2";
    -option go_package = "urlfetch";
    -
    -package appengine;
    -
    -message URLFetchServiceError {
    -  enum ErrorCode {
    -    OK = 0;
    -    INVALID_URL = 1;
    -    FETCH_ERROR = 2;
    -    UNSPECIFIED_ERROR = 3;
    -    RESPONSE_TOO_LARGE = 4;
    -    DEADLINE_EXCEEDED = 5;
    -    SSL_CERTIFICATE_ERROR = 6;
    -    DNS_ERROR = 7;
    -    CLOSED = 8;
    -    INTERNAL_TRANSIENT_ERROR = 9;
    -    TOO_MANY_REDIRECTS = 10;
    -    MALFORMED_REPLY = 11;
    -    CONNECTION_ERROR = 12;
    -  }
    -}
    -
    -message URLFetchRequest {
    -  enum RequestMethod {
    -    GET = 1;
    -    POST = 2;
    -    HEAD = 3;
    -    PUT = 4;
    -    DELETE = 5;
    -    PATCH = 6;
    -  }
    -  required RequestMethod Method = 1;
    -  required string Url = 2;
    -  repeated group Header = 3 {
    -    required string Key = 4;
    -    required string Value = 5;
    -  }
    -  optional bytes Payload = 6 [ctype=CORD];
    -
    -  optional bool FollowRedirects = 7 [default=true];
    -
    -  optional double Deadline = 8;
    -
    -  optional bool MustValidateServerCertificate = 9 [default=true];
    -}
    -
    -message URLFetchResponse {
    -  optional bytes Content = 1;
    -  required int32 StatusCode = 2;
    -  repeated group Header = 3 {
    -    required string Key = 4;
    -    required string Value = 5;
    -  }
    -  optional bool ContentWasTruncated = 6 [default=false];
    -  optional int64 ExternalBytesSent = 7;
    -  optional int64 ExternalBytesReceived = 8;
    -
    -  optional string FinalUrl = 9;
    -
    -  optional int64 ApiCpuMilliseconds = 10 [default=0];
    -  optional int64 ApiBytesSent = 11 [default=0];
    -  optional int64 ApiBytesReceived = 12 [default=0];
    -}
    diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
    deleted file mode 100644
    index 6f169be487..0000000000
    --- a/vendor/google.golang.org/appengine/namespace.go
    +++ /dev/null
    @@ -1,24 +0,0 @@
    -// Copyright 2012 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package appengine
    -
    -import (
    -	"context"
    -	"fmt"
    -	"regexp"
    -
    -	"google.golang.org/appengine/internal"
    -)
    -
    -// Namespace returns a replacement context that operates within the given namespace.
    -func Namespace(c context.Context, namespace string) (context.Context, error) {
    -	if !validNamespace.MatchString(namespace) {
    -		return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
    -	}
    -	return internal.NamespacedContext(c, namespace), nil
    -}
    -
    -// validNamespace matches valid namespace names.
    -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
    diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
    deleted file mode 100644
    index fcf3ad0a58..0000000000
    --- a/vendor/google.golang.org/appengine/timeout.go
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -// Copyright 2013 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -package appengine
    -
    -import "context"
    -
    -// IsTimeoutError reports whether err is a timeout error.
    -func IsTimeoutError(err error) bool {
    -	if err == context.DeadlineExceeded {
    -		return true
    -	}
    -	if t, ok := err.(interface {
    -		IsTimeout() bool
    -	}); ok {
    -		return t.IsTimeout()
    -	}
    -	return false
    -}
    diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
    deleted file mode 100644
    index 6c0d72418d..0000000000
    --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
    +++ /dev/null
    @@ -1,209 +0,0 @@
    -// Copyright 2011 Google Inc. All rights reserved.
    -// Use of this source code is governed by the Apache 2.0
    -// license that can be found in the LICENSE file.
    -
    -// Package urlfetch provides an http.RoundTripper implementation
    -// for fetching URLs via App Engine's urlfetch service.
    -package urlfetch // import "google.golang.org/appengine/urlfetch"
    -
    -import (
    -	"context"
    -	"errors"
    -	"fmt"
    -	"io"
    -	"io/ioutil"
    -	"net/http"
    -	"net/url"
    -	"strconv"
    -	"strings"
    -	"time"
    -
    -	"github.com/golang/protobuf/proto"
    -
    -	"google.golang.org/appengine/internal"
    -	pb "google.golang.org/appengine/internal/urlfetch"
    -)
    -
    -// Transport is an implementation of http.RoundTripper for
    -// App Engine. Users should generally create an http.Client using
    -// this transport and use the Client rather than using this transport
    -// directly.
    -type Transport struct {
    -	Context context.Context
    -
    -	// Controls whether the application checks the validity of SSL certificates
    -	// over HTTPS connections. A value of false (the default) instructs the
    -	// application to send a request to the server only if the certificate is
    -	// valid and signed by a trusted certificate authority (CA), and also
    -	// includes a hostname that matches the certificate. A value of true
    -	// instructs the application to perform no certificate validation.
    -	AllowInvalidServerCertificate bool
    -}
    -
    -// Verify statically that *Transport implements http.RoundTripper.
    -var _ http.RoundTripper = (*Transport)(nil)
    -
    -// Client returns an *http.Client using a default urlfetch Transport. This
    -// client will check the validity of SSL certificates.
    -//
    -// Any deadline of the provided context will be used for requests through this client.
    -// If the client does not have a deadline, then an App Engine default of 60 second is used.
    -func Client(ctx context.Context) *http.Client {
    -	return &http.Client{
    -		Transport: &Transport{
    -			Context: ctx,
    -		},
    -	}
    -}
    -
    -type bodyReader struct {
    -	content   []byte
    -	truncated bool
    -	closed    bool
    -}
    -
    -// ErrTruncatedBody is the error returned after the final Read() from a
    -// response's Body if the body has been truncated by App Engine's proxy.
    -var ErrTruncatedBody = errors.New("urlfetch: truncated body")
    -
    -func statusCodeToText(code int) string {
    -	if t := http.StatusText(code); t != "" {
    -		return t
    -	}
    -	return strconv.Itoa(code)
    -}
    -
    -func (br *bodyReader) Read(p []byte) (n int, err error) {
    -	if br.closed {
    -		if br.truncated {
    -			return 0, ErrTruncatedBody
    -		}
    -		return 0, io.EOF
    -	}
    -	n = copy(p, br.content)
    -	if n > 0 {
    -		br.content = br.content[n:]
    -		return
    -	}
    -	if br.truncated {
    -		br.closed = true
    -		return 0, ErrTruncatedBody
    -	}
    -	return 0, io.EOF
    -}
    -
    -func (br *bodyReader) Close() error {
    -	br.closed = true
    -	br.content = nil
    -	return nil
    -}
    -
    -// A map of the URL Fetch-accepted methods that take a request body.
    -var methodAcceptsRequestBody = map[string]bool{
    -	"POST":  true,
    -	"PUT":   true,
    -	"PATCH": true,
    -}
    -
    -// urlString returns a valid string given a URL. This function is necessary because
    -// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
    -// See http://code.google.com/p/go/issues/detail?id=4860.
    -func urlString(u *url.URL) string {
    -	if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
    -		return u.String()
    -	}
    -	aux := *u
    -	aux.Opaque = "//" + aux.Host + aux.Opaque
    -	return aux.String()
    -}
    -
    -// RoundTrip issues a single HTTP request and returns its response. Per the
    -// http.RoundTripper interface, RoundTrip only returns an error if there
    -// was an unsupported request or the URL Fetch proxy fails.
    -// Note that HTTP response codes such as 5xx, 403, 404, etc are not
    -// errors as far as the transport is concerned and will be returned
    -// with err set to nil.
    -func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
    -	methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
    -	if !ok {
    -		return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
    -	}
    -
    -	method := pb.URLFetchRequest_RequestMethod(methNum)
    -
    -	freq := &pb.URLFetchRequest{
    -		Method:                        &method,
    -		Url:                           proto.String(urlString(req.URL)),
    -		FollowRedirects:               proto.Bool(false), // http.Client's responsibility
    -		MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
    -	}
    -	if deadline, ok := t.Context.Deadline(); ok {
    -		freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
    -	}
    -
    -	for k, vals := range req.Header {
    -		for _, val := range vals {
    -			freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
    -				Key:   proto.String(k),
    -				Value: proto.String(val),
    -			})
    -		}
    -	}
    -	if methodAcceptsRequestBody[req.Method] && req.Body != nil {
    -		// Avoid a []byte copy if req.Body has a Bytes method.
    -		switch b := req.Body.(type) {
    -		case interface {
    -			Bytes() []byte
    -		}:
    -			freq.Payload = b.Bytes()
    -		default:
    -			freq.Payload, err = ioutil.ReadAll(req.Body)
    -			if err != nil {
    -				return nil, err
    -			}
    -		}
    -	}
    -
    -	fres := &pb.URLFetchResponse{}
    -	if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
    -		return nil, err
    -	}
    -
    -	res = &http.Response{}
    -	res.StatusCode = int(*fres.StatusCode)
    -	res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
    -	res.Header = make(http.Header)
    -	res.Request = req
    -
    -	// Faked:
    -	res.ProtoMajor = 1
    -	res.ProtoMinor = 1
    -	res.Proto = "HTTP/1.1"
    -	res.Close = true
    -
    -	for _, h := range fres.Header {
    -		hkey := http.CanonicalHeaderKey(*h.Key)
    -		hval := *h.Value
    -		if hkey == "Content-Length" {
    -			// Will get filled in below for all but HEAD requests.
    -			if req.Method == "HEAD" {
    -				res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
    -			}
    -			continue
    -		}
    -		res.Header.Add(hkey, hval)
    -	}
    -
    -	if req.Method != "HEAD" {
    -		res.ContentLength = int64(len(fres.Content))
    -	}
    -
    -	truncated := fres.GetContentWasTruncated()
    -	res.Body = &bodyReader{content: fres.Content, truncated: truncated}
    -	return
    -}
    -
    -func init() {
    -	internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
    -	internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
    -}
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
    index 191bea48c8..8b462f3dfe 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2015 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.12.2
    +// 	protoc        v4.24.4
     // source: google/api/annotations.proto
     
     package annotations
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
    index d5dccb9337..4a9fce53c4 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -180,6 +180,8 @@ type CommonLanguageSettings struct {
     	ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
     	// The destination where API teams want this client library to be published.
     	Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
    +	// Configuration for which RPCs should be generated in the GAPIC client.
    +	SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
     }
     
     func (x *CommonLanguageSettings) Reset() {
    @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination {
     	return nil
     }
     
    +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
    +	if x != nil {
    +		return x.SelectiveGapicGeneration
    +	}
    +	return nil
    +}
    +
     // Details about how and where to publish client libraries.
     type ClientLibrarySettings struct {
     	state         protoimpl.MessageState
    @@ -409,6 +418,9 @@ type Publishing struct {
     	// Optional link to proto reference documentation.  Example:
     	// https://cloud.google.com/pubsub/lite/docs/reference/rpc
     	ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"`
    +	// Optional link to REST reference documentation.  Example:
    +	// https://cloud.google.com/pubsub/lite/docs/reference/rest
    +	RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"`
     }
     
     func (x *Publishing) Reset() {
    @@ -513,6 +525,13 @@ func (x *Publishing) GetProtoReferenceDocumentationUri() string {
     	return ""
     }
     
    +func (x *Publishing) GetRestReferenceDocumentationUri() string {
    +	if x != nil {
    +		return x.RestReferenceDocumentationUri
    +	}
    +	return ""
    +}
    +
     // Settings for Java client libraries.
     type JavaSettings struct {
     	state         protoimpl.MessageState
    @@ -709,6 +728,8 @@ type PythonSettings struct {
     
     	// Some settings.
     	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +	// Experimental features to be included during client library generation.
    +	ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
     }
     
     func (x *PythonSettings) Reset() {
    @@ -750,6 +771,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
     	return nil
     }
     
    +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
    +	if x != nil {
    +		return x.ExperimentalFeatures
    +	}
    +	return nil
    +}
    +
     // Settings for Node client libraries.
     type NodeSettings struct {
     	state         protoimpl.MessageState
    @@ -965,6 +993,16 @@ type GoSettings struct {
     
     	// Some settings.
     	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +	// Map of service names to renamed services. Keys are the package relative
    +	// service names and values are the name to be used for the service client
    +	// and call options.
    +	//
    +	// publishing:
    +	//
    +	//	go_settings:
    +	//	  renamed_services:
    +	//	    Publisher: TopicAdmin
    +	RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
     }
     
     func (x *GoSettings) Reset() {
    @@ -1006,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings {
     	return nil
     }
     
    +func (x *GoSettings) GetRenamedServices() map[string]string {
    +	if x != nil {
    +		return x.RenamedServices
    +	}
    +	return nil
    +}
    +
     // Describes the generator configuration for a method.
     type MethodSettings struct {
     	state         protoimpl.MessageState
    @@ -1014,6 +1059,13 @@ type MethodSettings struct {
     
     	// The fully qualified name of the method, for which the options below apply.
     	// This is used to find the method to apply the options.
    +	//
    +	// Example:
    +	//
    +	//	publishing:
    +	//	  method_settings:
    +	//	  - selector: google.storage.control.v2.StorageControl.CreateFolder
    +	//	    # method settings for CreateFolder...
     	Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
     	// Describes settings to use for long-running operations when generating
     	// API methods for RPCs. Complements RPCs that use the annotations in
    @@ -1023,15 +1075,12 @@ type MethodSettings struct {
     	//
     	//	publishing:
     	//	  method_settings:
    -	//	    - selector: google.cloud.speech.v2.Speech.BatchRecognize
    -	//	      long_running:
    -	//	        initial_poll_delay:
    -	//	          seconds: 60 # 1 minute
    -	//	        poll_delay_multiplier: 1.5
    -	//	        max_poll_delay:
    -	//	          seconds: 360 # 6 minutes
    -	//	        total_poll_timeout:
    -	//	           seconds: 54000 # 90 minutes
    +	//	  - selector: google.cloud.speech.v2.Speech.BatchRecognize
    +	//	    long_running:
    +	//	      initial_poll_delay: 60s # 1 minute
    +	//	      poll_delay_multiplier: 1.5
    +	//	      max_poll_delay: 360s # 6 minutes
    +	//	      total_poll_timeout: 54000s # 90 minutes
     	LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
     	// List of top-level fields of the request message, that should be
     	// automatically populated by the client libraries based on their
    @@ -1041,9 +1090,9 @@ type MethodSettings struct {
     	//
     	//	publishing:
     	//	  method_settings:
    -	//	    - selector: google.example.v1.ExampleService.CreateExample
    -	//	      auto_populated_fields:
    -	//	      - request_id
    +	//	  - selector: google.example.v1.ExampleService.CreateExample
    +	//	    auto_populated_fields:
    +	//	    - request_id
     	AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
     }
     
    @@ -1100,6 +1149,123 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string {
     	return nil
     }
     
    +// This message is used to configure the generation of a subset of the RPCs in
    +// a service for client libraries.
    +type SelectiveGapicGeneration struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// An allowlist of the fully qualified names of RPCs that should be included
    +	// on public client surfaces.
    +	Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
    +}
    +
    +func (x *SelectiveGapicGeneration) Reset() {
    +	*x = SelectiveGapicGeneration{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[12]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *SelectiveGapicGeneration) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*SelectiveGapicGeneration) ProtoMessage() {}
    +
    +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[12]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
    +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{12}
    +}
    +
    +func (x *SelectiveGapicGeneration) GetMethods() []string {
    +	if x != nil {
    +		return x.Methods
    +	}
    +	return nil
    +}
    +
    +// Experimental features to be included during client library generation.
    +// These fields will be deprecated once the feature graduates and is enabled
    +// by default.
    +type PythonSettings_ExperimentalFeatures struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Enables generation of asynchronous REST clients if `rest` transport is
    +	// enabled. By default, asynchronous REST clients will not be generated.
    +	// This feature will be enabled by default 1 month after launching the
    +	// feature in preview packages.
    +	RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
    +	// Enables generation of protobuf code using new types that are more
    +	// Pythonic which are included in `protobuf>=5.29.x`. This feature will be
    +	// enabled by default 1 month after launching the feature in preview
    +	// packages.
    +	ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) Reset() {
    +	*x = PythonSettings_ExperimentalFeatures{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[14]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
    +
    +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[14]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
    +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
    +	if x != nil {
    +		return x.RestAsyncIoEnabled
    +	}
    +	return false
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
    +	if x != nil {
    +		return x.ProtobufPythonicTypesEnabled
    +	}
    +	return false
    +}
    +
     // Describes settings to use when generating API methods that use the
     // long-running operation pattern.
     // All default values below are from those used in the client library
    @@ -1128,7 +1294,7 @@ type MethodSettings_LongRunning struct {
     func (x *MethodSettings_LongRunning) Reset() {
     	*x = MethodSettings_LongRunning{}
     	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_api_client_proto_msgTypes[15]
    +		mi := &file_google_api_client_proto_msgTypes[18]
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		ms.StoreMessageInfo(mi)
     	}
    @@ -1141,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string {
     func (*MethodSettings_LongRunning) ProtoMessage() {}
     
     func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_api_client_proto_msgTypes[15]
    +	mi := &file_google_api_client_proto_msgTypes[18]
     	if protoimpl.UnsafeEnabled && x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
    @@ -1210,6 +1376,14 @@ var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{
     		Tag:           "bytes,1050,opt,name=oauth_scopes",
     		Filename:      "google/api/client.proto",
     	},
    +	{
    +		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
    +		ExtensionType: (*string)(nil),
    +		Field:         525000001,
    +		Name:          "google.api.api_version",
    +		Tag:           "bytes,525000001,opt,name=api_version",
    +		Filename:      "google/api/client.proto",
    +	},
     }
     
     // Extension fields to descriptorpb.MethodOptions.
    @@ -1291,6 +1465,23 @@ var (
     	//
     	// optional string oauth_scopes = 1050;
     	E_OauthScopes = &file_google_api_client_proto_extTypes[2]
    +	// The API version of this service, which should be sent by version-aware
    +	// clients to the service. This allows services to abide by the schema and
    +	// behavior of the service at the time this API version was deployed.
    +	// The format of the API version must be treated as opaque by clients.
    +	// Services may use a format with an apparent structure, but clients must
    +	// not rely on this to determine components within an API version, or attempt
    +	// to construct other valid API versions. Note that this is for upcoming
    +	// functionality and may not be implemented for all services.
    +	//
    +	// Example:
    +	//
    +	//	service Foo {
    +	//	  option (google.api.api_version) = "v1_20230821_preview";
    +	//	}
    +	//
    +	// optional string api_version = 525000001;
    +	E_ApiVersion = &file_google_api_client_proto_extTypes[3]
     )
     
     var File_google_api_client_proto protoreflect.FileDescriptor
    @@ -1304,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{
     	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
     	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
     	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
     	0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
     	0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
     	0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
    @@ -1313,223 +1504,267 @@ var file_google_api_client_proto_rawDesc = []byte{
     	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
     	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
     	0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
    -	0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05,
    -	0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
    -	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
    -	0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65,
    -	0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a,
    -	0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e,
    -	0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e,
    -	0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a,
    -	0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01,
    +	0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
    +	0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
    +	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
    +	0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
    +	0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
    +	0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
    +	0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
    +	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
    +	0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
    +	0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
    +	0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
    +	0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
    +	0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
    +	0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
    +	0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
    +	0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
    +	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
    +	0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
    +	0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
    +	0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
    +	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
    +	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
    +	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
    +	0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
     	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    -	0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61,
    -	0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70,
    -	0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b,
    -	0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70,
    -	0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
    -	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
    -	0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
    -	0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
    -	0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
    -	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74,
    -	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
    -	0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74,
    -	0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72,
    -	0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    -	0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75,
    -	0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f,
    -	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
    -	0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69,
    -	0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74,
    -	0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
    -	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69,
    -	0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
    -	0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64,
    -	0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69,
    -	0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
    -	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f,
    -	0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21,
    -	0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65,
    -	0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67,
    -	0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28,
    -	0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68,
    -	0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74,
    -	0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a,
    -	0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20,
    -	0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    -	0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72,
    -	0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61,
    -	0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72,
    -	0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    -	0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74,
    -	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f,
    -	0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
    -	0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
    -	0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72,
    -	0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
    -	0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61,
    -	0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62,
    -	0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73,
    -	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d,
    -	0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
    -	0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e,
    -	0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69,
    -	0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06,
    -	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
    -	0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    -	0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76,
    -	0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74,
    -	0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49,
    -	0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a,
    -	0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
    -	0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
    -	0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70,
    +	0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
    +	0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
    +	0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
    +	0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
    +	0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
    +	0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
    +	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
    +	0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    +	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
    +	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
    +	0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
    +	0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
    +	0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
    +	0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
    +	0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
    +	0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
    +	0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
    +	0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
    +	0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
    +	0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
    +	0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
    +	0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
    +	0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
    +	0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
    +	0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
    +	0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
    +	0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
    +	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
    +	0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
    +	0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
    +	0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
    +	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
    +	0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
    +	0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    +	0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
    +	0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
    +	0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
    +	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
    +	0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
    +	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
    +	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
    +	0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
    +	0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
    +	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
    +	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    +	0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
    +	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
    +	0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
    +	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
    +	0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
    +	0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
    +	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
    +	0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
    +	0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
    +	0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
    +	0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
    +	0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
    +	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
    +	0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
    +	0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
    +	0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
    +	0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    +	0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
    +	0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
    +	0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
    +	0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
    +	0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
    +	0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
    +	0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
    +	0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
    +	0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64,
    +	0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
    +	0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
    +	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
    +	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74,
     	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
     	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
     	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
     	0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
    -	0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65,
    +	0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f,
    +	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e,
    +	0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
    +	0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
    +	0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
    +	0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f,
    +	0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53,
    +	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
    +	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72,
    +	0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12,
    +	0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75,
    +	0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f,
    +	0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18,
    +	0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
    +	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16,
    +	0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41,
    +	0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
    +	0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    +	0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74,
    +	0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a,
    +	0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
    +	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
    +	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
    +	0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
    +	0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
    +	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
    +	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
    +	0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65,
     	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
     	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
     	0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
     	0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
    -	0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
    -	0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    -	0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae,
    -	0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
    +	0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
     	0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
     	0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
     	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
    -	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a,
    +	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a,
     	0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    -	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69,
    -	0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69,
    -	0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
    -	0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e,
    -	0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    -	0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    -	0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    -	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
    -	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f,
    -	0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20,
    -	0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
    -	0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f,
    -	0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65,
    -	0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e,
    -	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
    -	0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73,
    -	0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52,
    -	0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
    -	0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
    -	0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
    -	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65,
    -	0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e,
    -	0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
    -	0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
    -	0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
    -	0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
    -	0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47,
    -	0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
    -	0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
    -	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
    -	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    -	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65,
    -	0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65,
    -	0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e,
    -	0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65,
    -	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
    -	0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
    -	0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65,
    -	0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13,
    -	0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65,
    -	0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e,
    -	0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70,
    -	0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74,
    -	0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15,
    -	0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69,
    -	0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c,
    -	0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72,
    -	0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
    -	0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61,
    -	0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
    -	0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50,
    -	0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43,
    -	0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61,
    -	0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45,
    -	0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e,
    -	0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
    -	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01,
    -	0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f,
    -	0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f,
    -	0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49,
    -	0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a,
    -	0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07,
    -	0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
    -	0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26,
    -	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44,
    -	0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
    -	0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48,
    -	0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f,
    -	0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74,
    -	0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08,
    -	0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e,
    -	0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
    -	0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
    -	0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61,
    -	0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72,
    -	0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42,
    +	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
    +	0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45,
    +	0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72,
    +	0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64,
    +	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
    +	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
    +	0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
    +	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65,
    +	0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08,
    +	0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
    +	0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67,
    +	0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68,
    +	0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52,
    +	0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e,
    +	0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75,
    +	0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
    +	0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65,
    +	0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67,
    +	0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69,
    +	0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
    +	0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79,
    +	0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d,
    +	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52,
    +	0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70,
    +	0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c,
    +	0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
    +	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c,
    +	0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
    +	0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    +	0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f,
    +	0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34,
    +	0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63,
    +	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
    +	0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c,
    +	0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42,
    +	0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f,
    +	0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
    +	0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44,
    +	0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12,
    +	0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04,
    +	0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07,
    +	0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52,
    +	0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c,
    +	0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69,
    +	0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
    +	0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41,
    +	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
    +	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13,
    +	0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45,
    +	0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69,
    +	0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    +	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
    +	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a,
    +	0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12,
    +	0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    +	0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    +	0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
    +	0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63,
    +	0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61,
    +	0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69,
    +	0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
    +	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42,
     	0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
     	0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
     	0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
    @@ -1553,68 +1788,75 @@ func file_google_api_client_proto_rawDescGZIP() []byte {
     }
     
     var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
    -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
    +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
     var file_google_api_client_proto_goTypes = []interface{}{
    -	(ClientLibraryOrganization)(0),      // 0: google.api.ClientLibraryOrganization
    -	(ClientLibraryDestination)(0),       // 1: google.api.ClientLibraryDestination
    -	(*CommonLanguageSettings)(nil),      // 2: google.api.CommonLanguageSettings
    -	(*ClientLibrarySettings)(nil),       // 3: google.api.ClientLibrarySettings
    -	(*Publishing)(nil),                  // 4: google.api.Publishing
    -	(*JavaSettings)(nil),                // 5: google.api.JavaSettings
    -	(*CppSettings)(nil),                 // 6: google.api.CppSettings
    -	(*PhpSettings)(nil),                 // 7: google.api.PhpSettings
    -	(*PythonSettings)(nil),              // 8: google.api.PythonSettings
    -	(*NodeSettings)(nil),                // 9: google.api.NodeSettings
    -	(*DotnetSettings)(nil),              // 10: google.api.DotnetSettings
    -	(*RubySettings)(nil),                // 11: google.api.RubySettings
    -	(*GoSettings)(nil),                  // 12: google.api.GoSettings
    -	(*MethodSettings)(nil),              // 13: google.api.MethodSettings
    -	nil,                                 // 14: google.api.JavaSettings.ServiceClassNamesEntry
    -	nil,                                 // 15: google.api.DotnetSettings.RenamedServicesEntry
    -	nil,                                 // 16: google.api.DotnetSettings.RenamedResourcesEntry
    -	(*MethodSettings_LongRunning)(nil),  // 17: google.api.MethodSettings.LongRunning
    -	(api.LaunchStage)(0),                // 18: google.api.LaunchStage
    -	(*durationpb.Duration)(nil),         // 19: google.protobuf.Duration
    -	(*descriptorpb.MethodOptions)(nil),  // 20: google.protobuf.MethodOptions
    -	(*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
    +	(ClientLibraryOrganization)(0),              // 0: google.api.ClientLibraryOrganization
    +	(ClientLibraryDestination)(0),               // 1: google.api.ClientLibraryDestination
    +	(*CommonLanguageSettings)(nil),              // 2: google.api.CommonLanguageSettings
    +	(*ClientLibrarySettings)(nil),               // 3: google.api.ClientLibrarySettings
    +	(*Publishing)(nil),                          // 4: google.api.Publishing
    +	(*JavaSettings)(nil),                        // 5: google.api.JavaSettings
    +	(*CppSettings)(nil),                         // 6: google.api.CppSettings
    +	(*PhpSettings)(nil),                         // 7: google.api.PhpSettings
    +	(*PythonSettings)(nil),                      // 8: google.api.PythonSettings
    +	(*NodeSettings)(nil),                        // 9: google.api.NodeSettings
    +	(*DotnetSettings)(nil),                      // 10: google.api.DotnetSettings
    +	(*RubySettings)(nil),                        // 11: google.api.RubySettings
    +	(*GoSettings)(nil),                          // 12: google.api.GoSettings
    +	(*MethodSettings)(nil),                      // 13: google.api.MethodSettings
    +	(*SelectiveGapicGeneration)(nil),            // 14: google.api.SelectiveGapicGeneration
    +	nil,                                         // 15: google.api.JavaSettings.ServiceClassNamesEntry
    +	(*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
    +	nil,                                 // 17: google.api.DotnetSettings.RenamedServicesEntry
    +	nil,                                 // 18: google.api.DotnetSettings.RenamedResourcesEntry
    +	nil,                                 // 19: google.api.GoSettings.RenamedServicesEntry
    +	(*MethodSettings_LongRunning)(nil),  // 20: google.api.MethodSettings.LongRunning
    +	(api.LaunchStage)(0),                // 21: google.api.LaunchStage
    +	(*durationpb.Duration)(nil),         // 22: google.protobuf.Duration
    +	(*descriptorpb.MethodOptions)(nil),  // 23: google.protobuf.MethodOptions
    +	(*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
     }
     var file_google_api_client_proto_depIdxs = []int32{
     	1,  // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
    -	18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
    -	5,  // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
    -	6,  // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
    -	7,  // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
    -	8,  // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
    -	9,  // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
    -	10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
    -	11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
    -	12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
    -	13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
    -	0,  // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
    -	3,  // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
    -	14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
    -	2,  // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
    -	15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
    -	16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
    -	2,  // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
    -	2,  // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
    -	17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
    -	19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
    -	19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
    -	19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
    -	20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
    -	21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
    -	21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
    -	31, // [31:31] is the sub-list for method output_type
    -	31, // [31:31] is the sub-list for method input_type
    -	31, // [31:31] is the sub-list for extension type_name
    -	28, // [28:31] is the sub-list for extension extendee
    -	0,  // [0:28] is the sub-list for field type_name
    +	14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
    +	21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
    +	5,  // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
    +	6,  // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
    +	7,  // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
    +	8,  // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
    +	9,  // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
    +	10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
    +	11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
    +	12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
    +	13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
    +	0,  // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
    +	3,  // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
    +	15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
    +	2,  // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
    +	16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
    +	2,  // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
    +	17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
    +	18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
    +	2,  // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
    +	19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
    +	20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
    +	22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
    +	22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
    +	22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
    +	23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
    +	24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
    +	24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
    +	24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
    +	35, // [35:35] is the sub-list for method output_type
    +	35, // [35:35] is the sub-list for method input_type
    +	35, // [35:35] is the sub-list for extension type_name
    +	31, // [31:35] is the sub-list for extension extendee
    +	0,  // [0:31] is the sub-list for field type_name
     }
     
     func init() { file_google_api_client_proto_init() }
    @@ -1767,7 +2009,31 @@ func file_google_api_client_proto_init() {
     				return nil
     			}
     		}
    -		file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
    +		file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*SelectiveGapicGeneration); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*PythonSettings_ExperimentalFeatures); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
     			switch v := v.(*MethodSettings_LongRunning); i {
     			case 0:
     				return &v.state
    @@ -1786,8 +2052,8 @@ func file_google_api_client_proto_init() {
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: file_google_api_client_proto_rawDesc,
     			NumEnums:      2,
    -			NumMessages:   16,
    -			NumExtensions: 3,
    +			NumMessages:   19,
    +			NumExtensions: 4,
     			NumServices:   0,
     		},
     		GoTypes:           file_google_api_client_proto_goTypes,
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
    index 6ce01ac9a6..08505ba3fe 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.12
    +// 	protoc        v4.24.4
     // source: google/api/field_behavior.proto
     
     package annotations
    @@ -195,21 +195,21 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{
     	0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
     	0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
     	0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
    -	0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
    +	0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
     	0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
     	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
     	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
     	0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
    -	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65,
    -	0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f,
    -	0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69,
    -	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
    -	0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
    -	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x33,
    +	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52,
    +	0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70,
    +	0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50,
    +	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    +	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
    +	0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
    +	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
    +	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
    index d02e6bbc89..a462e7d013 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.12
    +// 	protoc        v4.24.4
     // source: google/api/field_info.proto
     
     package annotations
    @@ -56,9 +56,9 @@ const (
     	FieldInfo_IPV4 FieldInfo_Format = 2
     	// Internet Protocol v6 value as defined by [RFC
     	// 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be
    -	// normalized to entirely lowercase letters, and zero-padded partial and
    -	// empty octets. For example, the value `2001:DB8::` would be normalized to
    -	// `2001:0db8:0:0`.
    +	// normalized to entirely lowercase letters with zeros compressed, following
    +	// [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example,
    +	// the value `2001:0DB8:0::0` would be normalized to `2001:db8::`.
     	FieldInfo_IPV6 FieldInfo_Format = 3
     	// An IP address in either v4 or v6 format as described by the individual
     	// values defined herein. See the comments on the IPV4 and IPV6 types for
    @@ -121,6 +121,11 @@ type FieldInfo struct {
     	// any API consumer, just documents the API's format for the field it is
     	// applied to.
     	Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"`
    +	// The type(s) that the annotated, generic field may represent.
    +	//
    +	// Currently, this must only be used on fields of type `google.protobuf.Any`.
    +	// Supporting other generic types may be considered in the future.
    +	ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"`
     }
     
     func (x *FieldInfo) Reset() {
    @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format {
     	return FieldInfo_FORMAT_UNSPECIFIED
     }
     
    +func (x *FieldInfo) GetReferencedTypes() []*TypeReference {
    +	if x != nil {
    +		return x.ReferencedTypes
    +	}
    +	return nil
    +}
    +
    +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo].
    +type TypeReference struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The name of the type that the annotated, generic field may represent.
    +	// If the type is in the same protobuf package, the value can be the simple
    +	// message name e.g., `"MyMessage"`. Otherwise, the value must be the
    +	// fully-qualified message name e.g., `"google.library.v1.Book"`.
    +	//
    +	// If the type(s) are unknown to the service (e.g. the field accepts generic
    +	// user input), use the wildcard `"*"` to denote this behavior.
    +	//
    +	// See [AIP-202](https://google.aip.dev/202#type-references) for more details.
    +	TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
    +}
    +
    +func (x *TypeReference) Reset() {
    +	*x = TypeReference{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_field_info_proto_msgTypes[1]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *TypeReference) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*TypeReference) ProtoMessage() {}
    +
    +func (x *TypeReference) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_field_info_proto_msgTypes[1]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead.
    +func (*TypeReference) Descriptor() ([]byte, []int) {
    +	return file_google_api_field_info_proto_rawDescGZIP(), []int{1}
    +}
    +
    +func (x *TypeReference) GetTypeName() string {
    +	if x != nil {
    +		return x.TypeName
    +	}
    +	return ""
    +}
    +
     var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{
     	{
     		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
    @@ -185,6 +254,13 @@ var (
     	//	string actual_ip_address = 4 [
     	//	  (google.api.field_info).format = IPV4_OR_IPV6
     	//	];
    +	//	google.protobuf.Any generic_field = 5 [
    +	//	  (google.api.field_info).referenced_types = {type_name: "ActualType"},
    +	//	  (google.api.field_info).referenced_types = {type_name: "OtherType"},
    +	//	];
    +	//	google.protobuf.Any generic_user_input = 5 [
    +	//	  (google.api.field_info).referenced_types = {type_name: "*"},
    +	//	];
     	//
     	// optional google.api.FieldInfo field_info = 291403980;
     	E_FieldInfo = &file_google_api_field_info_proto_extTypes[0]
    @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{
     	0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67,
     	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
     	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09,
    +	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09,
     	0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72,
     	0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
     	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
    -	0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22,
    -	0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52,
    -	0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
    -	0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04,
    -	0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03,
    -	0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36,
    -	0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
    -	0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
    -	0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
    -	0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63,
    -	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46,
    -	0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
    -	0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
    -	0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
    -	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
    -	0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x33,
    +	0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
    +	0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79,
    +	0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72,
    +	0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64,
    +	0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
    +	0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
    +	0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34,
    +	0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04,
    +	0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f,
    +	0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65,
    +	0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
    +	0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
    +	0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
    +	0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
    +	0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42,
    +	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74,
    +	0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    +	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
    +	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte {
     }
     
     var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
    -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
     var file_google_api_field_info_proto_goTypes = []interface{}{
     	(FieldInfo_Format)(0),             // 0: google.api.FieldInfo.Format
     	(*FieldInfo)(nil),                 // 1: google.api.FieldInfo
    -	(*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions
    +	(*TypeReference)(nil),             // 2: google.api.TypeReference
    +	(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
     }
     var file_google_api_field_info_proto_depIdxs = []int32{
     	0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format
    -	2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions
    -	1, // 2: google.api.field_info:type_name -> google.api.FieldInfo
    -	3, // [3:3] is the sub-list for method output_type
    -	3, // [3:3] is the sub-list for method input_type
    -	2, // [2:3] is the sub-list for extension type_name
    -	1, // [1:2] is the sub-list for extension extendee
    -	0, // [0:1] is the sub-list for field type_name
    +	2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference
    +	3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions
    +	1, // 3: google.api.field_info:type_name -> google.api.FieldInfo
    +	4, // [4:4] is the sub-list for method output_type
    +	4, // [4:4] is the sub-list for method input_type
    +	3, // [3:4] is the sub-list for extension type_name
    +	2, // [2:3] is the sub-list for extension extendee
    +	0, // [0:2] is the sub-list for field type_name
     }
     
     func init() { file_google_api_field_info_proto_init() }
    @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() {
     				return nil
     			}
     		}
    +		file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*TypeReference); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
     	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
    @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() {
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: file_google_api_field_info_proto_rawDesc,
     			NumEnums:      1,
    -			NumMessages:   1,
    +			NumMessages:   2,
     			NumExtensions: 1,
     			NumServices:   0,
     		},
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
    index 8a0e1c345b..ffb5838cb1 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/http.proto
     
     package annotations
    @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     	return false
     }
     
    -// # gRPC Transcoding
    +// gRPC Transcoding
     //
     // gRPC Transcoding is a feature for mapping between a gRPC method and one or
     // more HTTP REST endpoints. It allows developers to build a single API service
    @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     //
     // This enables an HTTP REST to gRPC mapping as below:
     //
    -// HTTP | gRPC
    -// -----|-----
    -// `GET /v1/messages/123456`  | `GetMessage(name: "messages/123456")`
    +// - HTTP: `GET /v1/messages/123456`
    +// - gRPC: `GetMessage(name: "messages/123456")`
     //
     // Any fields in the request message which are not bound by the path template
     // automatically become HTTP query parameters if there is no HTTP request body.
    @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     //
     // This enables a HTTP JSON to RPC mapping as below:
     //
    -// HTTP | gRPC
    -// -----|-----
    -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
    -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
    -// "foo"))`
    +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
    +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
    +// SubMessage(subfield: "foo"))`
     //
     // Note that fields which are mapped to URL query parameters must have a
     // primitive type or a repeated primitive type or a non-repeated message type.
    @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     // representation of the JSON in the request body is determined by
     // protos JSON encoding:
     //
    -// HTTP | gRPC
    -// -----|-----
    -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
    -// "123456" message { text: "Hi!" })`
    +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
    +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
     //
     // The special name `*` can be used in the body mapping to define that
     // every field not bound by the path template should be mapped to the
    @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     //
     // The following HTTP JSON to RPC mapping is enabled:
     //
    -// HTTP | gRPC
    -// -----|-----
    -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
    -// "123456" text: "Hi!")`
    +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
    +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
     //
     // Note that when using `*` in the body mapping, it is not possible to
     // have HTTP parameters, as all fields not bound by the path end in
    @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     //
     // This enables the following two alternative HTTP JSON to RPC mappings:
     //
    -// HTTP | gRPC
    -// -----|-----
    -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
    -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
    -// "123456")`
    +// - HTTP: `GET /v1/messages/123456`
    +// - gRPC: `GetMessage(message_id: "123456")`
     //
    -// ## Rules for HTTP mapping
    +// - HTTP: `GET /v1/users/me/messages/123456`
    +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
    +//
    +// # Rules for HTTP mapping
     //
     //  1. Leaf request fields (recursive expansion nested messages in the request
     //     message) are classified into three categories:
    @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     //     request body, all
     //     fields are passed via URL path and URL query parameters.
     //
    -// ### Path template syntax
    +// Path template syntax
     //
     //	Template = "/" Segments [ Verb ] ;
     //	Segments = Segment { "/" Segment } ;
    @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     // Document](https://developers.google.com/discovery/v1/reference/apis) as
     // `{+var}`.
     //
    -// ## Using gRPC API Service Configuration
    +// # Using gRPC API Service Configuration
     //
     // gRPC API Service Configuration (service config) is a configuration language
     // for configuring a gRPC service to become a user-facing product. The
    @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool {
     // specified in the service config will override any matching transcoding
     // configuration in the proto.
     //
    -// Example:
    +// The following example selects a gRPC method and applies an `HttpRule` to it:
     //
     //	http:
     //	  rules:
    -//	    # Selects a gRPC method and applies HttpRule to it.
     //	    - selector: example.v1.Messaging.GetMessage
     //	      get: /v1/messages/{message_id}/{sub.subfield}
     //
    -// ## Special notes
    +// # Special notes
     //
     // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
     // proto to JSON conversion must follow the [proto3
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
    index bbcc12d29c..b5db279aeb 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/resource.proto
     
     package annotations
    @@ -253,8 +253,13 @@ type ResourceDescriptor struct {
     	History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
     	// The plural name used in the resource name and permission names, such as
     	// 'projects' for the resource name of 'projects/{project}' and the permission
    -	// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
    -	// concept of the `plural` field in k8s CRD spec
    +	// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
    +	// to this is for Nested Collections that have stuttering names, as defined
    +	// in [AIP-122](https://google.aip.dev/122#nested-collections), where the
    +	// collection ID in the resource name pattern does not necessarily directly
    +	// match the `plural` value.
    +	//
    +	// It is the same concept of the `plural` field in k8s CRD spec
     	// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
     	//
     	// Note: The plural form is required even for singleton resources. See
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
    index 9a9ae04c29..1d8397b02b 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/routing.proto
     
     package annotations
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
    index aa640dc31c..6e01be017c 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/distribution.proto
     
     package distribution
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
    index 75397e1b14..42bcacc363 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/label.proto
     
     package label
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
    index 454948669d..498020e33c 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/launch_stage.proto
     
     package api
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
    index 2af7d129ba..7f6e006cde 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/metric.proto
     
     package metric
    @@ -172,6 +172,63 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
     	return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1}
     }
     
    +// The resource hierarchy level of the timeseries data of a metric.
    +type MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel int32
    +
    +const (
    +	// Do not use this default value.
    +	MetricDescriptor_MetricDescriptorMetadata_TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 0
    +	// Scopes a metric to a project.
    +	MetricDescriptor_MetricDescriptorMetadata_PROJECT MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 1
    +	// Scopes a metric to an organization.
    +	MetricDescriptor_MetricDescriptorMetadata_ORGANIZATION MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 2
    +	// Scopes a metric to a folder.
    +	MetricDescriptor_MetricDescriptorMetadata_FOLDER MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 3
    +)
    +
    +// Enum value maps for MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel.
    +var (
    +	MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel_name = map[int32]string{
    +		0: "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED",
    +		1: "PROJECT",
    +		2: "ORGANIZATION",
    +		3: "FOLDER",
    +	}
    +	MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel_value = map[string]int32{
    +		"TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED": 0,
    +		"PROJECT":      1,
    +		"ORGANIZATION": 2,
    +		"FOLDER":       3,
    +	}
    +)
    +
    +func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Enum() *MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel {
    +	p := new(MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel)
    +	*p = x
    +	return p
    +}
    +
    +func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_metric_proto_enumTypes[2].Descriptor()
    +}
    +
    +func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Type() protoreflect.EnumType {
    +	return &file_google_api_metric_proto_enumTypes[2]
    +}
    +
    +func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel.Descriptor instead.
    +func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0, 0}
    +}
    +
     // Defines a metric type and its schema. Once a metric descriptor is created,
     // deleting or altering it stops data collection and makes the metric type's
     // existing data unusable.
    @@ -519,6 +576,8 @@ type MetricDescriptor_MetricDescriptorMetadata struct {
     	// age are guaranteed to be ingested and available to be read, excluding
     	// data loss due to errors.
     	IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"`
    +	// The scope of the timeseries data of the metric.
    +	TimeSeriesResourceHierarchyLevel []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel `protobuf:"varint,4,rep,packed,name=time_series_resource_hierarchy_level,json=timeSeriesResourceHierarchyLevel,proto3,enum=google.api.MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel" json:"time_series_resource_hierarchy_level,omitempty"`
     }
     
     func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() {
    @@ -575,6 +634,13 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb
     	return nil
     }
     
    +func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarchyLevel() []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel {
    +	if x != nil {
    +		return x.TimeSeriesResourceHierarchyLevel
    +	}
    +	return nil
    +}
    +
     var File_google_api_metric_proto protoreflect.FileDescriptor
     
     var file_google_api_metric_proto_rawDesc = []byte{
    @@ -585,7 +651,7 @@ var file_google_api_metric_proto_rawDesc = []byte{
     	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68,
     	0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f,
     	0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
    -	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a,
    +	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x09, 0x0a,
     	0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
     	0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
     	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20,
    @@ -620,7 +686,7 @@ var file_google_api_metric_proto_rawDesc = []byte{
     	0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79,
     	0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
     	0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
    -	0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
    +	0x73, 0x1a, 0x87, 0x04, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
     	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e,
     	0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01,
     	0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    @@ -633,35 +699,54 @@ var file_google_api_metric_proto_rawDesc = []byte{
     	0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
     	0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
     	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
    -	0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a,
    -	0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45,
    -	0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
    -	0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45,
    -	0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a,
    -	0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a,
    -	0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41,
    -	0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
    -	0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01,
    -	0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44,
    -	0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
    -	0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54,
    -	0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06,
    -	0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74,
    -	0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
    -	0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
    -	0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
    -	0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
    -	0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c,
    -	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
    -	0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    -	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
    -	0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47,
    -	0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +	0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0xa6, 0x01, 0x0a,
    +	0x24, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73,
    +	0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x5f,
    +	0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x56, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
    +	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
    +	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
    +	0x74, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73,
    +	0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x4c, 0x65,
    +	0x76, 0x65, 0x6c, 0x52, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52,
    +	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79,
    +	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a, 0x20, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
    +	0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72,
    +	0x61, 0x72, 0x63, 0x68, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x30, 0x54, 0x49,
    +	0x4d, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52,
    +	0x43, 0x45, 0x5f, 0x48, 0x49, 0x45, 0x52, 0x41, 0x52, 0x43, 0x48, 0x59, 0x5f, 0x4c, 0x45, 0x56,
    +	0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
    +	0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a,
    +	0x0c, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12,
    +	0x0a, 0x0a, 0x06, 0x46, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x03, 0x22, 0x4f, 0x0a, 0x0a, 0x4d,
    +	0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, 0x54,
    +	0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
    +	0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
    +	0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a,
    +	0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, 0x09,
    +	0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, 0x4c,
    +	0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
    +	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12,
    +	0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f,
    +	0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
    +	0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x49,
    +	0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, 0x22,
    +	0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
    +	0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x36,
    +	0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
    +	0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06,
    +	0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
    +	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
    +	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
    +	0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    +	0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
    +	0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65,
    +	0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, 0x41,
    +	0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    @@ -676,34 +761,36 @@ func file_google_api_metric_proto_rawDescGZIP() []byte {
     	return file_google_api_metric_proto_rawDescData
     }
     
    -var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
    +var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
     var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
     var file_google_api_metric_proto_goTypes = []interface{}{
    -	(MetricDescriptor_MetricKind)(0),                  // 0: google.api.MetricDescriptor.MetricKind
    -	(MetricDescriptor_ValueType)(0),                   // 1: google.api.MetricDescriptor.ValueType
    -	(*MetricDescriptor)(nil),                          // 2: google.api.MetricDescriptor
    -	(*Metric)(nil),                                    // 3: google.api.Metric
    -	(*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata
    -	nil,                           // 5: google.api.Metric.LabelsEntry
    -	(*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor
    -	(api.LaunchStage)(0),          // 7: google.api.LaunchStage
    -	(*durationpb.Duration)(nil),   // 8: google.protobuf.Duration
    +	(MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind
    +	(MetricDescriptor_ValueType)(0),  // 1: google.api.MetricDescriptor.ValueType
    +	(MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel)(0), // 2: google.api.MetricDescriptor.MetricDescriptorMetadata.TimeSeriesResourceHierarchyLevel
    +	(*MetricDescriptor)(nil), // 3: google.api.MetricDescriptor
    +	(*Metric)(nil),           // 4: google.api.Metric
    +	(*MetricDescriptor_MetricDescriptorMetadata)(nil), // 5: google.api.MetricDescriptor.MetricDescriptorMetadata
    +	nil,                           // 6: google.api.Metric.LabelsEntry
    +	(*label.LabelDescriptor)(nil), // 7: google.api.LabelDescriptor
    +	(api.LaunchStage)(0),          // 8: google.api.LaunchStage
    +	(*durationpb.Duration)(nil),   // 9: google.protobuf.Duration
     }
     var file_google_api_metric_proto_depIdxs = []int32{
    -	6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor
    -	0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
    -	1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
    -	4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata
    -	7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage
    -	5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry
    -	7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage
    -	8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration
    -	8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration
    -	9, // [9:9] is the sub-list for method output_type
    -	9, // [9:9] is the sub-list for method input_type
    -	9, // [9:9] is the sub-list for extension type_name
    -	9, // [9:9] is the sub-list for extension extendee
    -	0, // [0:9] is the sub-list for field type_name
    +	7,  // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor
    +	0,  // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
    +	1,  // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
    +	5,  // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata
    +	8,  // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage
    +	6,  // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry
    +	8,  // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage
    +	9,  // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration
    +	9,  // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration
    +	2,  // 9: google.api.MetricDescriptor.MetricDescriptorMetadata.time_series_resource_hierarchy_level:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata.TimeSeriesResourceHierarchyLevel
    +	10, // [10:10] is the sub-list for method output_type
    +	10, // [10:10] is the sub-list for method input_type
    +	10, // [10:10] is the sub-list for extension type_name
    +	10, // [10:10] is the sub-list for extension extendee
    +	0,  // [0:10] is the sub-list for field type_name
     }
     
     func init() { file_google_api_metric_proto_init() }
    @@ -754,7 +841,7 @@ func file_google_api_metric_proto_init() {
     		File: protoimpl.DescBuilder{
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: file_google_api_metric_proto_rawDesc,
    -			NumEnums:      2,
    +			NumEnums:      3,
     			NumMessages:   4,
     			NumExtensions: 0,
     			NumServices:   0,
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
    index de791ea614..b4cee29803 100644
    --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/api/monitored_resource.proto
     
     package monitoredres
    @@ -63,7 +63,7 @@ type MonitoredResourceDescriptor struct {
     	// Required. The monitored resource type. For example, the type
     	// `"cloudsql_database"` represents databases in Google Cloud SQL.
     	//
    -	//	For a list of types, see [Monitoring resource
    +	//	For a list of types, see [Monitored resource
     	//	types](https://cloud.google.com/monitoring/api/resources)
     	//
     	// and [Logging resource
    diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
    deleted file mode 100644
    index 1d3f1b5b7e..0000000000
    --- a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -// Copyright 2023 Google LLC
    -//
    -// Licensed under the Apache License, Version 2.0 (the "License");
    -// you may not use this file except in compliance with the License.
    -// You may obtain a copy of the License at
    -//
    -//      http://www.apache.org/licenses/LICENSE-2.0
    -//
    -// Unless required by applicable law or agreed to in writing, software
    -// distributed under the License is distributed on an "AS IS" BASIS,
    -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -// See the License for the specific language governing permissions and
    -// limitations under the License.
    -
    -// This file, and the {{.RootMod}} import, won't actually become part of
    -// the resultant binary.
    -//go:build modhack
    -// +build modhack
    -
    -package api
    -
    -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
    -import _ "google.golang.org/genproto/internal"
    diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
    index cc5d52fbcc..bd46edbe73 100644
    --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2022 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/rpc/code.proto
     
     package code
    diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    index 7bd161e48a..3cd9a5bb8e 100644
    --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2022 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/rpc/error_details.proto
     
     package errdetails
    @@ -80,11 +80,12 @@ type ErrorInfo struct {
     	Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
     	// Additional structured details about this error.
     	//
    -	// Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
    +	// Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should
    +	// ideally be lowerCamelCase. Also, they must be limited to 64 characters in
     	// length. When identifying the current value of an exceeded limit, the units
     	// should be contained in the key, not the value.  For example, rather than
    -	// {"instanceLimit": "100/request"}, should be returned as,
    -	// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
    +	// `{"instanceLimit": "100/request"}`, should be returned as,
    +	// `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
     	// instances that can be created in a single (batch) request.
     	Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
     }
    @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct {
     	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
     	// A description of why the request element is bad.
     	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
    +	// The reason of the field-level error. This is a constant value that
    +	// identifies the proximate cause of the field-level error. It should
    +	// uniquely identify the type of the FieldViolation within the scope of the
    +	// google.rpc.ErrorInfo.domain. This should be at most 63
    +	// characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`,
    +	// which represents UPPER_SNAKE_CASE.
    +	Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"`
    +	// Provides a localized error message for field-level errors that is safe to
    +	// return to the API consumer.
    +	LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"`
     }
     
     func (x *BadRequest_FieldViolation) Reset() {
    @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string {
     	return ""
     }
     
    +func (x *BadRequest_FieldViolation) GetReason() string {
    +	if x != nil {
    +		return x.Reason
    +	}
    +	return ""
    +}
    +
    +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage {
    +	if x != nil {
    +		return x.LocalizedMessage
    +	}
    +	return nil
    +}
    +
     // Describes a URL link.
     type Help_Link struct {
     	state         protoimpl.MessageState
    @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{
     	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
     	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
     	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
    +	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61,
     	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
     	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
     	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
     	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
     	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
    -	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
    -	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
    -	0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
    -	0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
    -	0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
    -	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
    -	0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
    -	0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
    -	0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
    -	0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
    -	0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
    -	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
    -	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
    -	0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
    -	0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
    -	0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
    -	0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
    -	0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
    -	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
    -	0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
    -	0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
    -	0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
    -	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
    -	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
    -	0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    -	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
    -	0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
    -	0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x33,
    +	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46,
    +	0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
    +	0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
    +	0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
    +	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a,
    +	0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
    +	0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d,
    +	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65,
    +	0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75,
    +	0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
    +	0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
    +	0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
    +	0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65,
    +	0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
    +	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
    +	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    +	0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
    +	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04,
    +	0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20,
    +	0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
    +	0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
    +	0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
    +	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
    +	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75,
    +	0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a,
    +	0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
    +	0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73,
    +	0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73,
    +	0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61,
    +	0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65,
    +	0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
    +	0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
    +	0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50,
    +	0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{
     	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
     	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
     	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
    -	6,  // [6:6] is the sub-list for method output_type
    -	6,  // [6:6] is the sub-list for method input_type
    -	6,  // [6:6] is the sub-list for extension type_name
    -	6,  // [6:6] is the sub-list for extension extendee
    -	0,  // [0:6] is the sub-list for field type_name
    +	9,  // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
    +	7,  // [7:7] is the sub-list for method output_type
    +	7,  // [7:7] is the sub-list for method input_type
    +	7,  // [7:7] is the sub-list for extension type_name
    +	7,  // [7:7] is the sub-list for extension extendee
    +	0,  // [0:7] is the sub-list for field type_name
     }
     
     func init() { file_google_rpc_error_details_proto_init() }
    diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    index a6b5081888..6ad1b1c1df 100644
    --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2022 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.21.9
    +// 	protoc        v4.24.4
     // source: google/rpc/status.proto
     
     package status
    diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go
    index 54de5458c6..cae02bce14 100644
    --- a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go
    +++ b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go
    @@ -1,4 +1,4 @@
    -// Copyright 2021 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -15,7 +15,7 @@
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
     // 	protoc-gen-go v1.26.0
    -// 	protoc        v3.12.2
    +// 	protoc        v4.24.4
     // source: google/type/calendar_period.proto
     
     package calendarperiod
    diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go
    deleted file mode 100644
    index 90e89b4aa3..0000000000
    --- a/vendor/google.golang.org/genproto/internal/doc.go
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -// Copyright 2023 Google LLC
    -//
    -// Licensed under the Apache License, Version 2.0 (the "License");
    -// you may not use this file except in compliance with the License.
    -// You may obtain a copy of the License at
    -//
    -//      http://www.apache.org/licenses/LICENSE-2.0
    -//
    -// Unless required by applicable law or agreed to in writing, software
    -// distributed under the License is distributed on an "AS IS" BASIS,
    -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -// See the License for the specific language governing permissions and
    -// limitations under the License.
    -
    -// This file makes internal an importable go package
    -// for use with backreferences from submodules.
    -package internal
    diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
    index 608aa6e1ac..d9bfa6e1e7 100644
    --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
    +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
    @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
     organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
     and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
     
    -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
    +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
     
     ## Legal requirements
     
    @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
       is a great place to start. These issues are well-documented and usually can be
       resolved with a single pull request.
     
    -- If you are adding a new file, make sure it has the copyright message template 
    -  at the top as a comment. You can copy over the message from an existing file 
    +- If you are adding a new file, make sure it has the copyright message template
    +  at the top as a comment. You can copy over the message from an existing file
       and update the year.
     
     - The grpc package should only depend on standard Go packages and a small number
    @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
       proposal](https://github.com/grpc/proposal).
     
     - Provide a good **PR description** as a record of **what** change is being made
    -  and **why** it was made. Link to a github issue if it exists.
    +  and **why** it was made. Link to a GitHub issue if it exists.
     
    -- If you want to fix formatting or style, consider whether your changes are an 
    -  obvious improvement or might be considered a personal preference. If a style 
    -  change is based on preference, it likely will not be accepted. If it corrects 
    -  widely agreed-upon anti-patterns, then please do create a PR and explain the 
    +- If you want to fix formatting or style, consider whether your changes are an
    +  obvious improvement or might be considered a personal preference. If a style
    +  change is based on preference, it likely will not be accepted. If it corrects
    +  widely agreed-upon anti-patterns, then please do create a PR and explain the
       benefits of the change.
     
     - Unless your PR is trivial, you should expect there will be reviewer comments
    @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly.
     - **All tests need to be passing** before your change can be merged. We
       recommend you **run tests locally** before creating your PR to catch breakages
       early on.
    -  - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
    +  - `./scripts/vet.sh` to catch vet errors
       - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
       - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
     
    diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
    index c6672c0a3e..5d4096d46a 100644
    --- a/vendor/google.golang.org/grpc/MAINTAINERS.md
    +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
    @@ -9,20 +9,28 @@ for general contribution guidelines.
     
     ## Maintainers (in alphabetical order)
     
    -- [cesarghali](https://github.com/cesarghali), Google LLC
    +- [aranjans](https://github.com/aranjans), Google LLC
    +- [arjan-bal](https://github.com/arjan-bal), Google LLC
    +- [arvindbr8](https://github.com/arvindbr8), Google LLC
    +- [atollena](https://github.com/atollena), Datadog, Inc.
     - [dfawley](https://github.com/dfawley), Google LLC
     - [easwars](https://github.com/easwars), Google LLC
    -- [menghanl](https://github.com/menghanl), Google LLC
    -- [srini100](https://github.com/srini100), Google LLC
    +- [erm-g](https://github.com/erm-g), Google LLC
    +- [gtcooke94](https://github.com/gtcooke94), Google LLC
    +- [purnesh42h](https://github.com/purnesh42h), Google LLC
    +- [zasweq](https://github.com/zasweq), Google LLC
     
     ## Emeritus Maintainers (in alphabetical order)
    -- [adelez](https://github.com/adelez), Google LLC
    -- [canguler](https://github.com/canguler), Google LLC
    -- [iamqizhao](https://github.com/iamqizhao), Google LLC
    -- [jadekler](https://github.com/jadekler), Google LLC
    -- [jtattermusch](https://github.com/jtattermusch), Google LLC
    -- [lyuxuan](https://github.com/lyuxuan), Google LLC
    -- [makmukhi](https://github.com/makmukhi), Google LLC
    -- [matt-kwong](https://github.com/matt-kwong), Google LLC
    -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
    -- [yongni](https://github.com/yongni), Google LLC
    +- [adelez](https://github.com/adelez)
    +- [canguler](https://github.com/canguler)
    +- [cesarghali](https://github.com/cesarghali)
    +- [iamqizhao](https://github.com/iamqizhao)
    +- [jeanbza](https://github.com/jeanbza)
    +- [jtattermusch](https://github.com/jtattermusch)
    +- [lyuxuan](https://github.com/lyuxuan)
    +- [makmukhi](https://github.com/makmukhi)
    +- [matt-kwong](https://github.com/matt-kwong)
    +- [menghanl](https://github.com/menghanl)
    +- [nicolasnoble](https://github.com/nicolasnoble)
    +- [srini100](https://github.com/srini100)
    +- [yongni](https://github.com/yongni)
    diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
    index 1f8960922b..be38384ff6 100644
    --- a/vendor/google.golang.org/grpc/Makefile
    +++ b/vendor/google.golang.org/grpc/Makefile
    @@ -30,17 +30,20 @@ testdeps:
     	GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
     
     vet: vetdeps
    -	./vet.sh
    +	./scripts/vet.sh
     
     vetdeps:
    -	./vet.sh -install
    +	./scripts/vet.sh -install
     
     .PHONY: \
     	all \
     	build \
     	clean \
    +	deps \
     	proto \
     	test \
    +	testsubmodule \
     	testrace \
    +	testdeps \
     	vet \
     	vetdeps
    diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
    index ab0fbb79b8..b572707c62 100644
    --- a/vendor/google.golang.org/grpc/README.md
    +++ b/vendor/google.golang.org/grpc/README.md
    @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
     
     ## Prerequisites
     
    -- **[Go][]**: any one of the **three latest major** [releases][go-releases].
    +- **[Go][]**: any one of the **two latest major** [releases][go-releases].
     
     ## Installation
     
    diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
    index be6e108705..abab279379 100644
    --- a/vendor/google.golang.org/grpc/SECURITY.md
    +++ b/vendor/google.golang.org/grpc/SECURITY.md
    @@ -1,3 +1,3 @@
     # Security Policy
     
    -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
    +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
    diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
    index 0787d0b50c..d7b40b7cb6 100644
    --- a/vendor/google.golang.org/grpc/backoff/backoff.go
    +++ b/vendor/google.golang.org/grpc/backoff/backoff.go
    @@ -39,7 +39,7 @@ type Config struct {
     	MaxDelay time.Duration
     }
     
    -// DefaultConfig is a backoff configuration with the default values specfied
    +// DefaultConfig is a backoff configuration with the default values specified
     // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
     //
     // This should be useful for callers who want to configure backoff with
    diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
    index d79560a2e2..382ad69411 100644
    --- a/vendor/google.golang.org/grpc/balancer/balancer.go
    +++ b/vendor/google.golang.org/grpc/balancer/balancer.go
    @@ -30,6 +30,7 @@ import (
     	"google.golang.org/grpc/channelz"
     	"google.golang.org/grpc/connectivity"
     	"google.golang.org/grpc/credentials"
    +	estats "google.golang.org/grpc/experimental/stats"
     	"google.golang.org/grpc/grpclog"
     	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/metadata"
    @@ -54,13 +55,14 @@ var (
     // an init() function), and is not thread-safe. If multiple Balancers are
     // registered with the same name, the one registered last will take effect.
     func Register(b Builder) {
    -	if strings.ToLower(b.Name()) != b.Name() {
    +	name := strings.ToLower(b.Name())
    +	if name != b.Name() {
     		// TODO: Skip the use of strings.ToLower() to index the map after v1.59
     		// is released to switch to case sensitive balancer registry. Also,
     		// remove this warning and update the docstrings for Register and Get.
     		logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
     	}
    -	m[strings.ToLower(b.Name())] = b
    +	m[name] = b
     }
     
     // unregisterForTesting deletes the balancer with the given name from the
    @@ -73,6 +75,8 @@ func unregisterForTesting(name string) {
     
     func init() {
     	internal.BalancerUnregister = unregisterForTesting
    +	internal.ConnectedAddress = connectedAddress
    +	internal.SetConnectedAddress = setConnectedAddress
     }
     
     // Get returns the resolver builder registered with the given name.
    @@ -91,54 +95,6 @@ func Get(name string) Builder {
     	return nil
     }
     
    -// A SubConn represents a single connection to a gRPC backend service.
    -//
    -// Each SubConn contains a list of addresses.
    -//
    -// All SubConns start in IDLE, and will not try to connect. To trigger the
    -// connecting, Balancers must call Connect.  If a connection re-enters IDLE,
    -// Balancers must call Connect again to trigger a new connection attempt.
    -//
    -// gRPC will try to connect to the addresses in sequence, and stop trying the
    -// remainder once the first connection is successful. If an attempt to connect
    -// to all addresses encounters an error, the SubConn will enter
    -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
    -//
    -// Once established, if a connection is lost, the SubConn will transition
    -// directly to IDLE.
    -//
    -// This interface is to be implemented by gRPC. Users should not need their own
    -// implementation of this interface. For situations like testing, any
    -// implementations should embed this interface. This allows gRPC to add new
    -// methods to this interface.
    -type SubConn interface {
    -	// UpdateAddresses updates the addresses used in this SubConn.
    -	// gRPC checks if currently-connected address is still in the new list.
    -	// If it's in the list, the connection will be kept.
    -	// If it's not in the list, the connection will gracefully closed, and
    -	// a new connection will be created.
    -	//
    -	// This will trigger a state transition for the SubConn.
    -	//
    -	// Deprecated: this method will be removed.  Create new SubConns for new
    -	// addresses instead.
    -	UpdateAddresses([]resolver.Address)
    -	// Connect starts the connecting for this SubConn.
    -	Connect()
    -	// GetOrBuildProducer returns a reference to the existing Producer for this
    -	// ProducerBuilder in this SubConn, or, if one does not currently exist,
    -	// creates a new one and returns it.  Returns a close function which must
    -	// be called when the Producer is no longer needed.
    -	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
    -	// Shutdown shuts down the SubConn gracefully.  Any started RPCs will be
    -	// allowed to complete.  No future calls should be made on the SubConn.
    -	// One final state update will be delivered to the StateListener (or
    -	// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
    -	// indicate the shutdown operation.  This may be delivered before
    -	// in-progress RPCs are complete and the actual connection is closed.
    -	Shutdown()
    -}
    -
     // NewSubConnOptions contains options to create new SubConn.
     type NewSubConnOptions struct {
     	// CredsBundle is the credentials bundle that will be used in the created
    @@ -232,8 +188,8 @@ type BuildOptions struct {
     	// implementations which do not communicate with a remote load balancer
     	// server can ignore this field.
     	Authority string
    -	// ChannelzParentID is the parent ClientConn's channelz ID.
    -	ChannelzParentID *channelz.Identifier
    +	// ChannelzParent is the parent ClientConn's channelz channel.
    +	ChannelzParent channelz.Identifier
     	// CustomUserAgent is the custom user agent set on the parent ClientConn.
     	// The balancer should set the same custom user agent if it creates a
     	// ClientConn.
    @@ -242,6 +198,10 @@ type BuildOptions struct {
     	// same resolver.Target as passed to the resolver. See the documentation for
     	// the resolver.Target type for details about what it contains.
     	Target resolver.Target
    +	// MetricsRecorder is the metrics recorder that balancers can use to record
    +	// metrics. Balancer implementations which do not register metrics on
    +	// metrics registry and record on them can ignore this field.
    +	MetricsRecorder estats.MetricsRecorder
     }
     
     // Builder creates a balancer.
    @@ -402,15 +362,6 @@ type ExitIdler interface {
     	ExitIdle()
     }
     
    -// SubConnState describes the state of a SubConn.
    -type SubConnState struct {
    -	// ConnectivityState is the connectivity state of the SubConn.
    -	ConnectivityState connectivity.State
    -	// ConnectionError is set if the ConnectivityState is TransientFailure,
    -	// describing the reason the SubConn failed.  Otherwise, it is nil.
    -	ConnectionError error
    -}
    -
     // ClientConnState describes the state of a ClientConn relevant to the
     // balancer.
     type ClientConnState struct {
    @@ -423,20 +374,3 @@ type ClientConnState struct {
     // ErrBadResolverState may be returned by UpdateClientConnState to indicate a
     // problem with the provided name resolver data.
     var ErrBadResolverState = errors.New("bad resolver state")
    -
    -// A ProducerBuilder is a simple constructor for a Producer.  It is used by the
    -// SubConn to create producers when needed.
    -type ProducerBuilder interface {
    -	// Build creates a Producer.  The first parameter is always a
    -	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
    -	// associated SubConn), but is declared as `any` to avoid a dependency
    -	// cycle.  Should also return a close function that will be called when all
    -	// references to the Producer have been given up.
    -	Build(grpcClientConnInterface any) (p Producer, close func())
    -}
    -
    -// A Producer is a type shared among potentially many consumers.  It is
    -// associated with a SubConn, and an implementation will typically contain
    -// other methods to provide additional functionality, e.g. configuration or
    -// subscription registration.
    -type Producer any
    diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
    index a7f1eeec8e..d5ed172ae6 100644
    --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
    +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
    @@ -36,7 +36,7 @@ type baseBuilder struct {
     	config        Config
     }
     
    -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
    +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
     	bal := &baseBalancer{
     		cc:            cc,
     		pickerBuilder: bb.pickerBuilder,
    @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
     		}
     	}
     	// If resolver state contains no addresses, return an error so ClientConn
    -	// will trigger re-resolve. Also records this as an resolver error, so when
    +	// will trigger re-resolve. Also records this as a resolver error, so when
     	// the overall state turns transient failure, the error message will have
     	// the zero address information.
     	if len(s.ResolverState.Addresses) == 0 {
    @@ -259,6 +259,6 @@ type errPicker struct {
     	err error // Pick() always returns this err.
     }
     
    -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
    +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
     	return balancer.PickResult{}, p.err
     }
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
    index f354530289..3f274482c7 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
    @@ -19,8 +19,8 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.31.0
    -// 	protoc        v4.22.0
    +// 	protoc-gen-go v1.35.1
    +// 	protoc        v5.27.1
     // source: grpc/lb/v1/load_balancer.proto
     
     package grpc_lb_v1
    @@ -55,11 +55,9 @@ type LoadBalanceRequest struct {
     
     func (x *LoadBalanceRequest) Reset() {
     	*x = LoadBalanceRequest{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *LoadBalanceRequest) String() string {
    @@ -70,7 +68,7 @@ func (*LoadBalanceRequest) ProtoMessage() {}
     
     func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -139,11 +137,9 @@ type InitialLoadBalanceRequest struct {
     
     func (x *InitialLoadBalanceRequest) Reset() {
     	*x = InitialLoadBalanceRequest{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *InitialLoadBalanceRequest) String() string {
    @@ -154,7 +150,7 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {}
     
     func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -190,11 +186,9 @@ type ClientStatsPerToken struct {
     
     func (x *ClientStatsPerToken) Reset() {
     	*x = ClientStatsPerToken{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ClientStatsPerToken) String() string {
    @@ -205,7 +199,7 @@ func (*ClientStatsPerToken) ProtoMessage() {}
     
     func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -258,11 +252,9 @@ type ClientStats struct {
     
     func (x *ClientStats) Reset() {
     	*x = ClientStats{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ClientStats) String() string {
    @@ -273,7 +265,7 @@ func (*ClientStats) ProtoMessage() {}
     
     func (x *ClientStats) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -345,11 +337,9 @@ type LoadBalanceResponse struct {
     
     func (x *LoadBalanceResponse) Reset() {
     	*x = LoadBalanceResponse{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *LoadBalanceResponse) String() string {
    @@ -360,7 +350,7 @@ func (*LoadBalanceResponse) ProtoMessage() {}
     
     func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -438,11 +428,9 @@ type FallbackResponse struct {
     
     func (x *FallbackResponse) Reset() {
     	*x = FallbackResponse{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FallbackResponse) String() string {
    @@ -453,7 +441,7 @@ func (*FallbackResponse) ProtoMessage() {}
     
     func (x *FallbackResponse) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -481,11 +469,9 @@ type InitialLoadBalanceResponse struct {
     
     func (x *InitialLoadBalanceResponse) Reset() {
     	*x = InitialLoadBalanceResponse{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *InitialLoadBalanceResponse) String() string {
    @@ -496,7 +482,7 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {}
     
     func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -532,11 +518,9 @@ type ServerList struct {
     
     func (x *ServerList) Reset() {
     	*x = ServerList{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ServerList) String() string {
    @@ -547,7 +531,7 @@ func (*ServerList) ProtoMessage() {}
     
     func (x *ServerList) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -597,11 +581,9 @@ type Server struct {
     
     func (x *Server) Reset() {
     	*x = Server{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Server) String() string {
    @@ -612,7 +594,7 @@ func (*Server) ProtoMessage() {}
     
     func (x *Server) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -780,7 +762,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte {
     }
     
     var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
    -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{
    +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{
     	(*LoadBalanceRequest)(nil),         // 0: grpc.lb.v1.LoadBalanceRequest
     	(*InitialLoadBalanceRequest)(nil),  // 1: grpc.lb.v1.InitialLoadBalanceRequest
     	(*ClientStatsPerToken)(nil),        // 2: grpc.lb.v1.ClientStatsPerToken
    @@ -817,121 +799,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
     	if File_grpc_lb_v1_load_balancer_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*LoadBalanceRequest); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*InitialLoadBalanceRequest); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ClientStatsPerToken); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ClientStats); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*LoadBalanceResponse); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FallbackResponse); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*InitialLoadBalanceResponse); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ServerList); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Server); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
    -	file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{
    +	file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{
     		(*LoadBalanceRequest_InitialRequest)(nil),
     		(*LoadBalanceRequest_ClientStats)(nil),
     	}
    -	file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{
    +	file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{
     		(*LoadBalanceResponse_InitialResponse)(nil),
     		(*LoadBalanceResponse_ServerList)(nil),
     		(*LoadBalanceResponse_FallbackResponse)(nil),
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
    index 00d0954b38..84e6a25056 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
    @@ -19,8 +19,8 @@
     
     // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
     // versions:
    -// - protoc-gen-go-grpc v1.3.0
    -// - protoc             v4.22.0
    +// - protoc-gen-go-grpc v1.5.1
    +// - protoc             v5.27.1
     // source: grpc/lb/v1/load_balancer.proto
     
     package grpc_lb_v1
    @@ -34,8 +34,8 @@ import (
     
     // This is a compile-time assertion to ensure that this generated file
     // is compatible with the grpc package it is being compiled against.
    -// Requires gRPC-Go v1.32.0 or later.
    -const _ = grpc.SupportPackageIsVersion7
    +// Requires gRPC-Go v1.64.0 or later.
    +const _ = grpc.SupportPackageIsVersion9
     
     const (
     	LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad"
    @@ -46,7 +46,7 @@ const (
     // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
     type LoadBalancerClient interface {
     	// Bidirectional rpc to get a list of servers.
    -	BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error)
    +	BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error)
     }
     
     type loadBalancerClient struct {
    @@ -57,52 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient {
     	return &loadBalancerClient{cc}
     }
     
    -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) {
    -	stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...)
    +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) {
    +	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
    +	stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...)
     	if err != nil {
     		return nil, err
     	}
    -	x := &loadBalancerBalanceLoadClient{stream}
    +	x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream}
     	return x, nil
     }
     
    -type LoadBalancer_BalanceLoadClient interface {
    -	Send(*LoadBalanceRequest) error
    -	Recv() (*LoadBalanceResponse, error)
    -	grpc.ClientStream
    -}
    -
    -type loadBalancerBalanceLoadClient struct {
    -	grpc.ClientStream
    -}
    -
    -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error {
    -	return x.ClientStream.SendMsg(m)
    -}
    -
    -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) {
    -	m := new(LoadBalanceResponse)
    -	if err := x.ClientStream.RecvMsg(m); err != nil {
    -		return nil, err
    -	}
    -	return m, nil
    -}
    +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
    +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse]
     
     // LoadBalancerServer is the server API for LoadBalancer service.
     // All implementations should embed UnimplementedLoadBalancerServer
    -// for forward compatibility
    +// for forward compatibility.
     type LoadBalancerServer interface {
     	// Bidirectional rpc to get a list of servers.
    -	BalanceLoad(LoadBalancer_BalanceLoadServer) error
    +	BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error
     }
     
    -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations.
    -type UnimplementedLoadBalancerServer struct {
    -}
    +// UnimplementedLoadBalancerServer should be embedded to have
    +// forward compatible implementations.
    +//
    +// NOTE: this should be embedded by value instead of pointer to avoid a nil
    +// pointer dereference when methods are called.
    +type UnimplementedLoadBalancerServer struct{}
     
    -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error {
    +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error {
     	return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented")
     }
    +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {}
     
     // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service.
     // Use of this interface is not recommended, as added methods to LoadBalancerServer will
    @@ -112,34 +98,22 @@ type UnsafeLoadBalancerServer interface {
     }
     
     func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) {
    +	// If the following call panics, it indicates UnimplementedLoadBalancerServer was
    +	// embedded by pointer and is nil.  This will cause panics if an
    +	// unimplemented method is ever invoked, so we test this at initialization
    +	// time to prevent it from happening at runtime later due to I/O.
    +	if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
    +		t.testEmbeddedByValue()
    +	}
     	s.RegisterService(&LoadBalancer_ServiceDesc, srv)
     }
     
     func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error {
    -	return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream})
    -}
    -
    -type LoadBalancer_BalanceLoadServer interface {
    -	Send(*LoadBalanceResponse) error
    -	Recv() (*LoadBalanceRequest, error)
    -	grpc.ServerStream
    -}
    -
    -type loadBalancerBalanceLoadServer struct {
    -	grpc.ServerStream
    +	return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream})
     }
     
    -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error {
    -	return x.ServerStream.SendMsg(m)
    -}
    -
    -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) {
    -	m := new(LoadBalanceRequest)
    -	if err := x.ServerStream.RecvMsg(m); err != nil {
    -		return nil, err
    -	}
    -	return m, nil
    -}
    +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
    +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]
     
     // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service.
     // It's only intended for direct use with grpc.RegisterService,
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
    index 86ba65be4c..0770b88e96 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
    @@ -44,8 +44,8 @@ import (
     	"google.golang.org/grpc/internal/resolver/dns"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/resolver/manual"
    +	"google.golang.org/protobuf/types/known/durationpb"
     
    -	durationpb "github.com/golang/protobuf/ptypes/duration"
     	lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
     )
     
    @@ -197,7 +197,7 @@ type lbBalancer struct {
     
     	// manualResolver is used in the remote LB ClientConn inside grpclb. When
     	// resolved address updates are received by grpclb, filtered updates will be
    -	// send to remote LB ClientConn through this resolver.
    +	// sent to remote LB ClientConn through this resolver.
     	manualResolver *manual.Resolver
     	// The ClientConn to talk to the remote balancer.
     	ccRemoteLB *remoteBalancerCCWrapper
    @@ -219,7 +219,7 @@ type lbBalancer struct {
     	// All backends addresses, with metadata set to nil. This list contains all
     	// backend addresses in the same order and with the same duplicates as in
     	// serverlist. When generating picker, a SubConn slice with the same order
    -	// but with only READY SCs will be gerenated.
    +	// but with only READY SCs will be generated.
     	backendAddrsWithoutMetadata []resolver.Address
     	// Roundrobin functionalities.
     	state    connectivity.State
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
    index 8942c31310..96a57c8c70 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
    @@ -21,14 +21,14 @@ package grpclb
     import (
     	"encoding/json"
     
    -	"google.golang.org/grpc"
    +	"google.golang.org/grpc/balancer/pickfirst"
     	"google.golang.org/grpc/balancer/roundrobin"
     	"google.golang.org/grpc/serviceconfig"
     )
     
     const (
     	roundRobinName = roundrobin.Name
    -	pickFirstName  = grpc.PickFirstBalancerName
    +	pickFirstName  = pickfirst.Name
     )
     
     type grpclbServiceConfig struct {
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
    index 20c5f2ec39..9ff07522d7 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
    @@ -19,13 +19,13 @@
     package grpclb
     
     import (
    +	rand "math/rand/v2"
     	"sync"
     	"sync/atomic"
     
     	"google.golang.org/grpc/balancer"
     	lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
     	"google.golang.org/grpc/codes"
    -	"google.golang.org/grpc/internal/grpcrand"
     	"google.golang.org/grpc/status"
     )
     
    @@ -112,7 +112,7 @@ type rrPicker struct {
     func newRRPicker(readySCs []balancer.SubConn) *rrPicker {
     	return &rrPicker{
     		subConns:     readySCs,
    -		subConnsNext: grpcrand.Intn(len(readySCs)),
    +		subConnsNext: rand.IntN(len(readySCs)),
     	}
     }
     
    @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *
     	return &lbPicker{
     		serverList:   serverList,
     		subConns:     readySCs,
    -		subConnsNext: grpcrand.Intn(len(readySCs)),
    +		subConnsNext: rand.IntN(len(readySCs)),
     		stats:        stats,
     	}
     }
    diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
    index c8fe1edd8e..506fae0d4e 100644
    --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
    +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
    @@ -26,7 +26,6 @@ import (
     	"sync"
     	"time"
     
    -	"github.com/golang/protobuf/proto"
     	"google.golang.org/grpc"
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/connectivity"
    @@ -36,8 +35,9 @@ import (
     	"google.golang.org/grpc/keepalive"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/resolver"
    +	"google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/types/known/timestamppb"
     
    -	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
     	lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
     )
     
    @@ -246,7 +246,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() error {
     	// Explicitly set pickfirst as the balancer.
     	dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`))
     	dopts = append(dopts, grpc.WithResolvers(lb.manualResolver))
    -	dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID))
    +	dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParent))
     
     	// Enable Keepalive for grpclb client.
     	dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
    diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
    new file mode 100644
    index 0000000000..7d66cb491c
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
    @@ -0,0 +1,35 @@
    +/*
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package internal contains code internal to the pickfirst package.
    +package internal
    +
    +import (
    +	rand "math/rand/v2"
    +	"time"
    +)
    +
    +var (
    +	// RandShuffle pseudo-randomizes the order of addresses.
    +	RandShuffle = rand.Shuffle
    +	// TimeAfterFunc allows mocking the timer for testing connection delay
    +	// related functionality.
    +	TimeAfterFunc = func(d time.Duration, f func()) func() {
    +		timer := time.AfterFunc(d, f)
    +		return func() { timer.Stop() }
    +	}
    +)
    diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
    new file mode 100644
    index 0000000000..ea8899818c
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
    @@ -0,0 +1,291 @@
    +/*
    + *
    + * Copyright 2017 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package pickfirst contains the pick_first load balancing policy.
    +package pickfirst
    +
    +import (
    +	"encoding/json"
    +	"errors"
    +	"fmt"
    +	rand "math/rand/v2"
    +
    +	"google.golang.org/grpc/balancer"
    +	"google.golang.org/grpc/balancer/pickfirst/internal"
    +	"google.golang.org/grpc/connectivity"
    +	"google.golang.org/grpc/grpclog"
    +	"google.golang.org/grpc/internal/envconfig"
    +	internalgrpclog "google.golang.org/grpc/internal/grpclog"
    +	"google.golang.org/grpc/internal/pretty"
    +	"google.golang.org/grpc/resolver"
    +	"google.golang.org/grpc/serviceconfig"
    +
    +	_ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
    +)
    +
    +func init() {
    +	if envconfig.NewPickFirstEnabled {
    +		return
    +	}
    +	balancer.Register(pickfirstBuilder{})
    +}
    +
    +var logger = grpclog.Component("pick-first-lb")
    +
    +const (
    +	// Name is the name of the pick_first balancer.
    +	Name      = "pick_first"
    +	logPrefix = "[pick-first-lb %p] "
    +)
    +
    +type pickfirstBuilder struct{}
    +
    +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
    +	b := &pickfirstBalancer{cc: cc}
    +	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
    +	return b
    +}
    +
    +func (pickfirstBuilder) Name() string {
    +	return Name
    +}
    +
    +type pfConfig struct {
    +	serviceconfig.LoadBalancingConfig `json:"-"`
    +
    +	// If set to true, instructs the LB policy to shuffle the order of the list
    +	// of endpoints received from the name resolver before attempting to
    +	// connect to them.
    +	ShuffleAddressList bool `json:"shuffleAddressList"`
    +}
    +
    +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    +	var cfg pfConfig
    +	if err := json.Unmarshal(js, &cfg); err != nil {
    +		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    +	}
    +	return cfg, nil
    +}
    +
    +type pickfirstBalancer struct {
    +	logger  *internalgrpclog.PrefixLogger
    +	state   connectivity.State
    +	cc      balancer.ClientConn
    +	subConn balancer.SubConn
    +}
    +
    +func (b *pickfirstBalancer) ResolverError(err error) {
    +	if b.logger.V(2) {
    +		b.logger.Infof("Received error from the name resolver: %v", err)
    +	}
    +	if b.subConn == nil {
    +		b.state = connectivity.TransientFailure
    +	}
    +
    +	if b.state != connectivity.TransientFailure {
    +		// The picker will not change since the balancer does not currently
    +		// report an error.
    +		return
    +	}
    +	b.cc.UpdateState(balancer.State{
    +		ConnectivityState: connectivity.TransientFailure,
    +		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
    +	})
    +}
    +
    +// Shuffler is an interface for shuffling an address list.
    +type Shuffler interface {
    +	ShuffleAddressListForTesting(n int, swap func(i, j int))
    +}
    +
    +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
    +// is the number of elements.  swap swaps the elements with indexes i and j.
    +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
    +
    +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
    +	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
    +		// The resolver reported an empty address list. Treat it like an error by
    +		// calling b.ResolverError.
    +		if b.subConn != nil {
    +			// Shut down the old subConn. All addresses were removed, so it is
    +			// no longer valid.
    +			b.subConn.Shutdown()
    +			b.subConn = nil
    +		}
    +		b.ResolverError(errors.New("produced zero addresses"))
    +		return balancer.ErrBadResolverState
    +	}
    +	// We don't have to guard this block with the env var because ParseConfig
    +	// already does so.
    +	cfg, ok := state.BalancerConfig.(pfConfig)
    +	if state.BalancerConfig != nil && !ok {
    +		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
    +	}
    +
    +	if b.logger.V(2) {
    +		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
    +	}
    +
    +	var addrs []resolver.Address
    +	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
    +		// Perform the optional shuffling described in gRFC A62. The shuffling will
    +		// change the order of endpoints but not touch the order of the addresses
    +		// within each endpoint. - A61
    +		if cfg.ShuffleAddressList {
    +			endpoints = append([]resolver.Endpoint{}, endpoints...)
    +			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
    +		}
    +
    +		// "Flatten the list by concatenating the ordered list of addresses for each
    +		// of the endpoints, in order." - A61
    +		for _, endpoint := range endpoints {
    +			// "In the flattened list, interleave addresses from the two address
    +			// families, as per RFC-8304 section 4." - A61
    +			// TODO: support the above language.
    +			addrs = append(addrs, endpoint.Addresses...)
    +		}
    +	} else {
    +		// Endpoints not set, process addresses until we migrate resolver
    +		// emissions fully to Endpoints. The top channel does wrap emitted
    +		// addresses with endpoints, however some balancers such as weighted
    +		// target do not forward the corresponding correct endpoints down/split
    +		// endpoints properly. Once all balancers correctly forward endpoints
    +		// down, can delete this else conditional.
    +		addrs = state.ResolverState.Addresses
    +		if cfg.ShuffleAddressList {
    +			addrs = append([]resolver.Address{}, addrs...)
    +			rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
    +		}
    +	}
    +
    +	if b.subConn != nil {
    +		b.cc.UpdateAddresses(b.subConn, addrs)
    +		return nil
    +	}
    +
    +	var subConn balancer.SubConn
    +	subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
    +		StateListener: func(state balancer.SubConnState) {
    +			b.updateSubConnState(subConn, state)
    +		},
    +	})
    +	if err != nil {
    +		if b.logger.V(2) {
    +			b.logger.Infof("Failed to create new SubConn: %v", err)
    +		}
    +		b.state = connectivity.TransientFailure
    +		b.cc.UpdateState(balancer.State{
    +			ConnectivityState: connectivity.TransientFailure,
    +			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
    +		})
    +		return balancer.ErrBadResolverState
    +	}
    +	b.subConn = subConn
    +	b.state = connectivity.Idle
    +	b.cc.UpdateState(balancer.State{
    +		ConnectivityState: connectivity.Connecting,
    +		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +	})
    +	b.subConn.Connect()
    +	return nil
    +}
    +
    +// UpdateSubConnState is unused as a StateListener is always registered when
    +// creating SubConns.
    +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    +	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
    +}
    +
    +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    +	if b.logger.V(2) {
    +		b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
    +	}
    +	if b.subConn != subConn {
    +		if b.logger.V(2) {
    +			b.logger.Infof("Ignored state change because subConn is not recognized")
    +		}
    +		return
    +	}
    +	if state.ConnectivityState == connectivity.Shutdown {
    +		b.subConn = nil
    +		return
    +	}
    +
    +	switch state.ConnectivityState {
    +	case connectivity.Ready:
    +		b.cc.UpdateState(balancer.State{
    +			ConnectivityState: state.ConnectivityState,
    +			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
    +		})
    +	case connectivity.Connecting:
    +		if b.state == connectivity.TransientFailure {
    +			// We stay in TransientFailure until we are Ready. See A62.
    +			return
    +		}
    +		b.cc.UpdateState(balancer.State{
    +			ConnectivityState: state.ConnectivityState,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +		})
    +	case connectivity.Idle:
    +		if b.state == connectivity.TransientFailure {
    +			// We stay in TransientFailure until we are Ready. Also kick the
    +			// subConn out of Idle into Connecting. See A62.
    +			b.subConn.Connect()
    +			return
    +		}
    +		b.cc.UpdateState(balancer.State{
    +			ConnectivityState: state.ConnectivityState,
    +			Picker:            &idlePicker{subConn: subConn},
    +		})
    +	case connectivity.TransientFailure:
    +		b.cc.UpdateState(balancer.State{
    +			ConnectivityState: state.ConnectivityState,
    +			Picker:            &picker{err: state.ConnectionError},
    +		})
    +	}
    +	b.state = state.ConnectivityState
    +}
    +
    +func (b *pickfirstBalancer) Close() {
    +}
    +
    +func (b *pickfirstBalancer) ExitIdle() {
    +	if b.subConn != nil && b.state == connectivity.Idle {
    +		b.subConn.Connect()
    +	}
    +}
    +
    +type picker struct {
    +	result balancer.PickResult
    +	err    error
    +}
    +
    +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    +	return p.result, p.err
    +}
    +
    +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
    +// CONNECTING when Pick is called.
    +type idlePicker struct {
    +	subConn balancer.SubConn
    +}
    +
    +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    +	i.subConn.Connect()
    +	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
    +}
    diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
    new file mode 100644
    index 0000000000..2fc0a71f94
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
    @@ -0,0 +1,911 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package pickfirstleaf contains the pick_first load balancing policy which
    +// will be the universal leaf policy after dualstack changes are implemented.
    +//
    +// # Experimental
    +//
    +// Notice: This package is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +package pickfirstleaf
    +
    +import (
    +	"encoding/json"
    +	"errors"
    +	"fmt"
    +	"net"
    +	"net/netip"
    +	"sync"
    +	"time"
    +
    +	"google.golang.org/grpc/balancer"
    +	"google.golang.org/grpc/balancer/pickfirst/internal"
    +	"google.golang.org/grpc/connectivity"
    +	expstats "google.golang.org/grpc/experimental/stats"
    +	"google.golang.org/grpc/grpclog"
    +	"google.golang.org/grpc/internal/envconfig"
    +	internalgrpclog "google.golang.org/grpc/internal/grpclog"
    +	"google.golang.org/grpc/internal/pretty"
    +	"google.golang.org/grpc/resolver"
    +	"google.golang.org/grpc/serviceconfig"
    +)
    +
    +func init() {
    +	if envconfig.NewPickFirstEnabled {
    +		// Register as the default pick_first balancer.
    +		Name = "pick_first"
    +	}
    +	balancer.Register(pickfirstBuilder{})
    +}
    +
    +// enableHealthListenerKeyType is a unique key type used in resolver attributes
    +// to indicate whether the health listener usage is enabled.
    +type enableHealthListenerKeyType struct{}
    +
    +var (
    +	logger = grpclog.Component("pick-first-leaf-lb")
    +	// Name is the name of the pick_first_leaf balancer.
    +	// It is changed to "pick_first" in init() if this balancer is to be
    +	// registered as the default pickfirst.
    +	Name                 = "pick_first_leaf"
    +	disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.disconnections",
    +		Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
    +		Unit:        "disconnection",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +	connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.connection_attempts_succeeded",
    +		Description: "EXPERIMENTAL. Number of successful connection attempts.",
    +		Unit:        "attempt",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +	connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
    +		Name:        "grpc.lb.pick_first.connection_attempts_failed",
    +		Description: "EXPERIMENTAL. Number of failed connection attempts.",
    +		Unit:        "attempt",
    +		Labels:      []string{"grpc.target"},
    +		Default:     false,
    +	})
    +)
    +
    +const (
    +	// TODO: change to pick-first when this becomes the default pick_first policy.
    +	logPrefix = "[pick-first-leaf-lb %p] "
    +	// connectionDelayInterval is the time to wait for during the happy eyeballs
    +	// pass before starting the next connection attempt.
    +	connectionDelayInterval = 250 * time.Millisecond
    +)
    +
    +type ipAddrFamily int
    +
    +const (
    +	// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
    +	// address.
    +	ipAddrFamilyUnknown ipAddrFamily = iota
    +	ipAddrFamilyV4
    +	ipAddrFamilyV6
    +)
    +
    +type pickfirstBuilder struct{}
    +
    +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
    +	b := &pickfirstBalancer{
    +		cc:              cc,
    +		target:          bo.Target.String(),
    +		metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder.
    +
    +		subConns:              resolver.NewAddressMap(),
    +		state:                 connectivity.Connecting,
    +		cancelConnectionTimer: func() {},
    +	}
    +	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
    +	return b
    +}
    +
    +func (b pickfirstBuilder) Name() string {
    +	return Name
    +}
    +
    +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    +	var cfg pfConfig
    +	if err := json.Unmarshal(js, &cfg); err != nil {
    +		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    +	}
    +	return cfg, nil
    +}
    +
    +// EnableHealthListener updates the state to configure pickfirst for using a
    +// generic health listener.
    +func EnableHealthListener(state resolver.State) resolver.State {
    +	state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
    +	return state
    +}
    +
    +type pfConfig struct {
    +	serviceconfig.LoadBalancingConfig `json:"-"`
    +
    +	// If set to true, instructs the LB policy to shuffle the order of the list
    +	// of endpoints received from the name resolver before attempting to
    +	// connect to them.
    +	ShuffleAddressList bool `json:"shuffleAddressList"`
    +}
    +
    +// scData keeps track of the current state of the subConn.
    +// It is not safe for concurrent access.
    +type scData struct {
    +	// The following fields are initialized at build time and read-only after
    +	// that.
    +	subConn balancer.SubConn
    +	addr    resolver.Address
    +
    +	rawConnectivityState connectivity.State
    +	// The effective connectivity state based on raw connectivity, health state
    +	// and after following sticky TransientFailure behaviour defined in A62.
    +	effectiveState              connectivity.State
    +	lastErr                     error
    +	connectionFailedInFirstPass bool
    +}
    +
    +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
    +	sd := &scData{
    +		rawConnectivityState: connectivity.Idle,
    +		effectiveState:       connectivity.Idle,
    +		addr:                 addr,
    +	}
    +	sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
    +		StateListener: func(state balancer.SubConnState) {
    +			b.updateSubConnState(sd, state)
    +		},
    +	})
    +	if err != nil {
    +		return nil, err
    +	}
    +	sd.subConn = sc
    +	return sd, nil
    +}
    +
    +type pickfirstBalancer struct {
    +	// The following fields are initialized at build time and read-only after
    +	// that and therefore do not need to be guarded by a mutex.
    +	logger          *internalgrpclog.PrefixLogger
    +	cc              balancer.ClientConn
    +	target          string
    +	metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
    +
    +	// The mutex is used to ensure synchronization of updates triggered
    +	// from the idle picker and the already serialized resolver,
    +	// SubConn state updates.
    +	mu sync.Mutex
    +	// State reported to the channel based on SubConn states and resolver
    +	// updates.
    +	state connectivity.State
    +	// scData for active subonns mapped by address.
    +	subConns              *resolver.AddressMap
    +	addressList           addressList
    +	firstPass             bool
    +	numTF                 int
    +	cancelConnectionTimer func()
    +	healthCheckingEnabled bool
    +}
    +
    +// ResolverError is called by the ClientConn when the name resolver produces
    +// an error or when pickfirst determined the resolver update to be invalid.
    +func (b *pickfirstBalancer) ResolverError(err error) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.resolverErrorLocked(err)
    +}
    +
    +func (b *pickfirstBalancer) resolverErrorLocked(err error) {
    +	if b.logger.V(2) {
    +		b.logger.Infof("Received error from the name resolver: %v", err)
    +	}
    +
    +	// The picker will not change since the balancer does not currently
    +	// report an error. If the balancer hasn't received a single good resolver
    +	// update yet, transition to TRANSIENT_FAILURE.
    +	if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
    +		if b.logger.V(2) {
    +			b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
    +		}
    +		return
    +	}
    +
    +	b.updateBalancerState(balancer.State{
    +		ConnectivityState: connectivity.TransientFailure,
    +		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
    +	})
    +}
    +
    +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.cancelConnectionTimer()
    +	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
    +		// Cleanup state pertaining to the previous resolver state.
    +		// Treat an empty address list like an error by calling b.ResolverError.
    +		b.closeSubConnsLocked()
    +		b.addressList.updateAddrs(nil)
    +		b.resolverErrorLocked(errors.New("produced zero addresses"))
    +		return balancer.ErrBadResolverState
    +	}
    +	b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
    +	cfg, ok := state.BalancerConfig.(pfConfig)
    +	if state.BalancerConfig != nil && !ok {
    +		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
    +	}
    +
    +	if b.logger.V(2) {
    +		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
    +	}
    +
    +	var newAddrs []resolver.Address
    +	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
    +		// Perform the optional shuffling described in gRFC A62. The shuffling
    +		// will change the order of endpoints but not touch the order of the
    +		// addresses within each endpoint. - A61
    +		if cfg.ShuffleAddressList {
    +			endpoints = append([]resolver.Endpoint{}, endpoints...)
    +			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
    +		}
    +
    +		// "Flatten the list by concatenating the ordered list of addresses for
    +		// each of the endpoints, in order." - A61
    +		for _, endpoint := range endpoints {
    +			newAddrs = append(newAddrs, endpoint.Addresses...)
    +		}
    +	} else {
    +		// Endpoints not set, process addresses until we migrate resolver
    +		// emissions fully to Endpoints. The top channel does wrap emitted
    +		// addresses with endpoints, however some balancers such as weighted
    +		// target do not forward the corresponding correct endpoints down/split
    +		// endpoints properly. Once all balancers correctly forward endpoints
    +		// down, can delete this else conditional.
    +		newAddrs = state.ResolverState.Addresses
    +		if cfg.ShuffleAddressList {
    +			newAddrs = append([]resolver.Address{}, newAddrs...)
    +			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
    +		}
    +	}
    +
    +	// If an address appears in multiple endpoints or in the same endpoint
    +	// multiple times, we keep it only once. We will create only one SubConn
    +	// for the address because an AddressMap is used to store SubConns.
    +	// Not de-duplicating would result in attempting to connect to the same
    +	// SubConn multiple times in the same pass. We don't want this.
    +	newAddrs = deDupAddresses(newAddrs)
    +	newAddrs = interleaveAddresses(newAddrs)
    +
    +	prevAddr := b.addressList.currentAddress()
    +	prevSCData, found := b.subConns.Get(prevAddr)
    +	prevAddrsCount := b.addressList.size()
    +	isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
    +	b.addressList.updateAddrs(newAddrs)
    +
    +	// If the previous ready SubConn exists in new address list,
    +	// keep this connection and don't create new SubConns.
    +	if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
    +		return nil
    +	}
    +
    +	b.reconcileSubConnsLocked(newAddrs)
    +	// If it's the first resolver update or the balancer was already READY
    +	// (but the new address list does not contain the ready SubConn) or
    +	// CONNECTING, enter CONNECTING.
    +	// We may be in TRANSIENT_FAILURE due to a previous empty address list,
    +	// we should still enter CONNECTING because the sticky TF behaviour
    +	//  mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
    +	// due to connectivity failures.
    +	if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
    +		// Start connection attempt at first address.
    +		b.forceUpdateConcludedStateLocked(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +		})
    +		b.startFirstPassLocked()
    +	} else if b.state == connectivity.TransientFailure {
    +		// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
    +		// we're READY. See A62.
    +		b.startFirstPassLocked()
    +	}
    +	return nil
    +}
    +
    +// UpdateSubConnState is unused as a StateListener is always registered when
    +// creating SubConns.
    +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    +	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
    +}
    +
    +func (b *pickfirstBalancer) Close() {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	b.closeSubConnsLocked()
    +	b.cancelConnectionTimer()
    +	b.state = connectivity.Shutdown
    +}
    +
    +// ExitIdle moves the balancer out of idle state. It can be called concurrently
    +// by the idlePicker and clientConn so access to variables should be
    +// synchronized.
    +func (b *pickfirstBalancer) ExitIdle() {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	if b.state == connectivity.Idle {
    +		b.startFirstPassLocked()
    +	}
    +}
    +
    +func (b *pickfirstBalancer) startFirstPassLocked() {
    +	b.firstPass = true
    +	b.numTF = 0
    +	// Reset the connection attempt record for existing SubConns.
    +	for _, sd := range b.subConns.Values() {
    +		sd.(*scData).connectionFailedInFirstPass = false
    +	}
    +	b.requestConnectionLocked()
    +}
    +
    +func (b *pickfirstBalancer) closeSubConnsLocked() {
    +	for _, sd := range b.subConns.Values() {
    +		sd.(*scData).subConn.Shutdown()
    +	}
    +	b.subConns = resolver.NewAddressMap()
    +}
    +
    +// deDupAddresses ensures that each address appears only once in the slice.
    +func deDupAddresses(addrs []resolver.Address) []resolver.Address {
    +	seenAddrs := resolver.NewAddressMap()
    +	retAddrs := []resolver.Address{}
    +
    +	for _, addr := range addrs {
    +		if _, ok := seenAddrs.Get(addr); ok {
    +			continue
    +		}
    +		retAddrs = append(retAddrs, addr)
    +	}
    +	return retAddrs
    +}
    +
    +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
    +// as per RFC-8305 section 4.
    +// Whichever address family is first in the list is followed by an address of
    +// the other address family; that is, if the first address in the list is IPv6,
    +// then the first IPv4 address should be moved up in the list to be second in
    +// the list. It doesn't support configuring "First Address Family Count", i.e.
    +// there will always be a single member of the first address family at the
    +// beginning of the interleaved list.
    +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
    +// "unknown" family for interleaving.
    +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
    +func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
    +	familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
    +	interleavingOrder := []ipAddrFamily{}
    +	for _, addr := range addrs {
    +		family := addressFamily(addr.Addr)
    +		if _, found := familyAddrsMap[family]; !found {
    +			interleavingOrder = append(interleavingOrder, family)
    +		}
    +		familyAddrsMap[family] = append(familyAddrsMap[family], addr)
    +	}
    +
    +	interleavedAddrs := make([]resolver.Address, 0, len(addrs))
    +
    +	for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
    +		// Some IP types may have fewer addresses than others, so we look for
    +		// the next type that has a remaining member to add to the interleaved
    +		// list.
    +		family := interleavingOrder[curFamilyIdx]
    +		remainingMembers := familyAddrsMap[family]
    +		if len(remainingMembers) > 0 {
    +			interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
    +			familyAddrsMap[family] = remainingMembers[1:]
    +		}
    +	}
    +
    +	return interleavedAddrs
    +}
    +
    +// addressFamily returns the ipAddrFamily after parsing the address string.
    +// If the address isn't of the format "ip-address:port", it returns
    +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
    +// using a resolver like passthrough where the address may be a hostname in
    +// some format that the dialer can resolve.
    +func addressFamily(address string) ipAddrFamily {
    +	// Parse the IP after removing the port.
    +	host, _, err := net.SplitHostPort(address)
    +	if err != nil {
    +		return ipAddrFamilyUnknown
    +	}
    +	ip, err := netip.ParseAddr(host)
    +	if err != nil {
    +		return ipAddrFamilyUnknown
    +	}
    +	switch {
    +	case ip.Is4() || ip.Is4In6():
    +		return ipAddrFamilyV4
    +	case ip.Is6():
    +		return ipAddrFamilyV6
    +	default:
    +		return ipAddrFamilyUnknown
    +	}
    +}
    +
    +// reconcileSubConnsLocked updates the active subchannels based on a new address
    +// list from the resolver. It does this by:
    +//   - closing subchannels: any existing subchannels associated with addresses
    +//     that are no longer in the updated list are shut down.
    +//   - removing subchannels: entries for these closed subchannels are removed
    +//     from the subchannel map.
    +//
    +// This ensures that the subchannel map accurately reflects the current set of
    +// addresses received from the name resolver.
    +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
    +	newAddrsMap := resolver.NewAddressMap()
    +	for _, addr := range newAddrs {
    +		newAddrsMap.Set(addr, true)
    +	}
    +
    +	for _, oldAddr := range b.subConns.Keys() {
    +		if _, ok := newAddrsMap.Get(oldAddr); ok {
    +			continue
    +		}
    +		val, _ := b.subConns.Get(oldAddr)
    +		val.(*scData).subConn.Shutdown()
    +		b.subConns.Delete(oldAddr)
    +	}
    +}
    +
    +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
    +// becomes ready, which means that all other subConn must be shutdown.
    +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
    +	b.cancelConnectionTimer()
    +	for _, v := range b.subConns.Values() {
    +		sd := v.(*scData)
    +		if sd.subConn != selected.subConn {
    +			sd.subConn.Shutdown()
    +		}
    +	}
    +	b.subConns = resolver.NewAddressMap()
    +	b.subConns.Set(selected.addr, selected)
    +}
    +
    +// requestConnectionLocked starts connecting on the subchannel corresponding to
    +// the current address. If no subchannel exists, one is created. If the current
    +// subchannel is in TransientFailure, a connection to the next address is
    +// attempted until a subchannel is found.
    +func (b *pickfirstBalancer) requestConnectionLocked() {
    +	if !b.addressList.isValid() {
    +		return
    +	}
    +	var lastErr error
    +	for valid := true; valid; valid = b.addressList.increment() {
    +		curAddr := b.addressList.currentAddress()
    +		sd, ok := b.subConns.Get(curAddr)
    +		if !ok {
    +			var err error
    +			// We want to assign the new scData to sd from the outer scope,
    +			// hence we can't use := below.
    +			sd, err = b.newSCData(curAddr)
    +			if err != nil {
    +				// This should never happen, unless the clientConn is being shut
    +				// down.
    +				if b.logger.V(2) {
    +					b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
    +				}
    +				// Do nothing, the LB policy will be closed soon.
    +				return
    +			}
    +			b.subConns.Set(curAddr, sd)
    +		}
    +
    +		scd := sd.(*scData)
    +		switch scd.rawConnectivityState {
    +		case connectivity.Idle:
    +			scd.subConn.Connect()
    +			b.scheduleNextConnectionLocked()
    +			return
    +		case connectivity.TransientFailure:
    +			// The SubConn is being re-used and failed during a previous pass
    +			// over the addressList. It has not completed backoff yet.
    +			// Mark it as having failed and try the next address.
    +			scd.connectionFailedInFirstPass = true
    +			lastErr = scd.lastErr
    +			continue
    +		case connectivity.Connecting:
    +			// Wait for the connection attempt to complete or the timer to fire
    +			// before attempting the next address.
    +			b.scheduleNextConnectionLocked()
    +			return
    +		default:
    +			b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
    +			return
    +
    +		}
    +	}
    +
    +	// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
    +	// first pass if possible.
    +	b.endFirstPassIfPossibleLocked(lastErr)
    +}
    +
    +func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
    +	b.cancelConnectionTimer()
    +	if !b.addressList.hasNext() {
    +		return
    +	}
    +	curAddr := b.addressList.currentAddress()
    +	cancelled := false // Access to this is protected by the balancer's mutex.
    +	closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
    +		b.mu.Lock()
    +		defer b.mu.Unlock()
    +		// If the scheduled task is cancelled while acquiring the mutex, return.
    +		if cancelled {
    +			return
    +		}
    +		if b.logger.V(2) {
    +			b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
    +		}
    +		if b.addressList.increment() {
    +			b.requestConnectionLocked()
    +		}
    +	})
    +	// Access to the cancellation callback held by the balancer is guarded by
    +	// the balancer's mutex, so it's safe to set the boolean from the callback.
    +	b.cancelConnectionTimer = sync.OnceFunc(func() {
    +		cancelled = true
    +		closeFn()
    +	})
    +}
    +
    +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	oldState := sd.rawConnectivityState
    +	sd.rawConnectivityState = newState.ConnectivityState
    +	// Previously relevant SubConns can still callback with state updates.
    +	// To prevent pickers from returning these obsolete SubConns, this logic
    +	// is included to check if the current list of active SubConns includes this
    +	// SubConn.
    +	if !b.isActiveSCData(sd) {
    +		return
    +	}
    +	if newState.ConnectivityState == connectivity.Shutdown {
    +		sd.effectiveState = connectivity.Shutdown
    +		return
    +	}
    +
    +	// Record a connection attempt when exiting CONNECTING.
    +	if newState.ConnectivityState == connectivity.TransientFailure {
    +		sd.connectionFailedInFirstPass = true
    +		connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
    +	}
    +
    +	if newState.ConnectivityState == connectivity.Ready {
    +		connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    +		b.shutdownRemainingLocked(sd)
    +		if !b.addressList.seekTo(sd.addr) {
    +			// This should not fail as we should have only one SubConn after
    +			// entering READY. The SubConn should be present in the addressList.
    +			b.logger.Errorf("Address %q not found address list in  %v", sd.addr, b.addressList.addresses)
    +			return
    +		}
    +		if !b.healthCheckingEnabled {
    +			if b.logger.V(2) {
    +				b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
    +			}
    +
    +			sd.effectiveState = connectivity.Ready
    +			b.updateBalancerState(balancer.State{
    +				ConnectivityState: connectivity.Ready,
    +				Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
    +			})
    +			return
    +		}
    +		if b.logger.V(2) {
    +			b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
    +		}
    +		// Send a CONNECTING update to take the SubConn out of sticky-TF if
    +		// required.
    +		sd.effectiveState = connectivity.Connecting
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +		})
    +		sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
    +			b.updateSubConnHealthState(sd, scs)
    +		})
    +		return
    +	}
    +
    +	// If the LB policy is READY, and it receives a subchannel state change,
    +	// it means that the READY subchannel has failed.
    +	// A SubConn can also transition from CONNECTING directly to IDLE when
    +	// a transport is successfully created, but the connection fails
    +	// before the SubConn can send the notification for READY. We treat
    +	// this as a successful connection and transition to IDLE.
    +	// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
    +	// part of the if condition below once the issue is fixed.
    +	if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
    +		// Once a transport fails, the balancer enters IDLE and starts from
    +		// the first address when the picker is used.
    +		b.shutdownRemainingLocked(sd)
    +		sd.effectiveState = newState.ConnectivityState
    +		// READY SubConn interspliced in between CONNECTING and IDLE, need to
    +		// account for that.
    +		if oldState == connectivity.Connecting {
    +			// A known issue (https://github.com/grpc/grpc-go/issues/7862)
    +			// causes a race that prevents the READY state change notification.
    +			// This works around it.
    +			connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
    +		}
    +		disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
    +		b.addressList.reset()
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Idle,
    +			Picker:            &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
    +		})
    +		return
    +	}
    +
    +	if b.firstPass {
    +		switch newState.ConnectivityState {
    +		case connectivity.Connecting:
    +			// The effective state can be in either IDLE, CONNECTING or
    +			// TRANSIENT_FAILURE. If it's  TRANSIENT_FAILURE, stay in
    +			// TRANSIENT_FAILURE until it's READY. See A62.
    +			if sd.effectiveState != connectivity.TransientFailure {
    +				sd.effectiveState = connectivity.Connecting
    +				b.updateBalancerState(balancer.State{
    +					ConnectivityState: connectivity.Connecting,
    +					Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +				})
    +			}
    +		case connectivity.TransientFailure:
    +			sd.lastErr = newState.ConnectionError
    +			sd.effectiveState = connectivity.TransientFailure
    +			// Since we're re-using common SubConns while handling resolver
    +			// updates, we could receive an out of turn TRANSIENT_FAILURE from
    +			// a pass over the previous address list. Happy Eyeballs will also
    +			// cause out of order updates to arrive.
    +
    +			if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
    +				b.cancelConnectionTimer()
    +				if b.addressList.increment() {
    +					b.requestConnectionLocked()
    +					return
    +				}
    +			}
    +
    +			// End the first pass if we've seen a TRANSIENT_FAILURE from all
    +			// SubConns once.
    +			b.endFirstPassIfPossibleLocked(newState.ConnectionError)
    +		}
    +		return
    +	}
    +
    +	// We have finished the first pass, keep re-connecting failing SubConns.
    +	switch newState.ConnectivityState {
    +	case connectivity.TransientFailure:
    +		b.numTF = (b.numTF + 1) % b.subConns.Len()
    +		sd.lastErr = newState.ConnectionError
    +		if b.numTF%b.subConns.Len() == 0 {
    +			b.updateBalancerState(balancer.State{
    +				ConnectivityState: connectivity.TransientFailure,
    +				Picker:            &picker{err: newState.ConnectionError},
    +			})
    +		}
    +		// We don't need to request re-resolution since the SubConn already
    +		// does that before reporting TRANSIENT_FAILURE.
    +		// TODO: #7534 - Move re-resolution requests from SubConn into
    +		// pick_first.
    +	case connectivity.Idle:
    +		sd.subConn.Connect()
    +	}
    +}
    +
    +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
    +// addresses are tried and their SubConns have reported a failure.
    +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
    +	// An optimization to avoid iterating over the entire SubConn map.
    +	if b.addressList.isValid() {
    +		return
    +	}
    +	// Connect() has been called on all the SubConns. The first pass can be
    +	// ended if all the SubConns have reported a failure.
    +	for _, v := range b.subConns.Values() {
    +		sd := v.(*scData)
    +		if !sd.connectionFailedInFirstPass {
    +			return
    +		}
    +	}
    +	b.firstPass = false
    +	b.updateBalancerState(balancer.State{
    +		ConnectivityState: connectivity.TransientFailure,
    +		Picker:            &picker{err: lastErr},
    +	})
    +	// Start re-connecting all the SubConns that are already in IDLE.
    +	for _, v := range b.subConns.Values() {
    +		sd := v.(*scData)
    +		if sd.rawConnectivityState == connectivity.Idle {
    +			sd.subConn.Connect()
    +		}
    +	}
    +}
    +
    +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
    +	activeSD, found := b.subConns.Get(sd.addr)
    +	return found && activeSD == sd
    +}
    +
    +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
    +	b.mu.Lock()
    +	defer b.mu.Unlock()
    +	// Previously relevant SubConns can still callback with state updates.
    +	// To prevent pickers from returning these obsolete SubConns, this logic
    +	// is included to check if the current list of active SubConns includes
    +	// this SubConn.
    +	if !b.isActiveSCData(sd) {
    +		return
    +	}
    +	sd.effectiveState = state.ConnectivityState
    +	switch state.ConnectivityState {
    +	case connectivity.Ready:
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Ready,
    +			Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
    +		})
    +	case connectivity.TransientFailure:
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.TransientFailure,
    +			Picker:            &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
    +		})
    +	case connectivity.Connecting:
    +		b.updateBalancerState(balancer.State{
    +			ConnectivityState: connectivity.Connecting,
    +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    +		})
    +	default:
    +		b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
    +	}
    +}
    +
    +// updateBalancerState stores the state reported to the channel and calls
    +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
    +// updates to the channel.
    +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
    +	// In case of TransientFailures allow the picker to be updated to update
    +	// the connectivity error, in all other cases don't send duplicate state
    +	// updates.
    +	if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
    +		return
    +	}
    +	b.forceUpdateConcludedStateLocked(newState)
    +}
    +
    +// forceUpdateConcludedStateLocked stores the state reported to the channel and
    +// calls ClientConn.UpdateState().
    +// A separate function is defined to force update the ClientConn state since the
    +// channel doesn't correctly assume that LB policies start in CONNECTING and
    +// relies on LB policy to send an initial CONNECTING update.
    +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
    +	b.state = newState.ConnectivityState
    +	b.cc.UpdateState(newState)
    +}
    +
    +type picker struct {
    +	result balancer.PickResult
    +	err    error
    +}
    +
    +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    +	return p.result, p.err
    +}
    +
    +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
    +// CONNECTING when Pick is called.
    +type idlePicker struct {
    +	exitIdle func()
    +}
    +
    +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    +	i.exitIdle()
    +	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
    +}
    +
    +// addressList manages sequentially iterating over addresses present in a list
    +// of endpoints. It provides a 1 dimensional view of the addresses present in
    +// the endpoints.
    +// This type is not safe for concurrent access.
    +type addressList struct {
    +	addresses []resolver.Address
    +	idx       int
    +}
    +
    +func (al *addressList) isValid() bool {
    +	return al.idx < len(al.addresses)
    +}
    +
    +func (al *addressList) size() int {
    +	return len(al.addresses)
    +}
    +
    +// increment moves to the next index in the address list.
    +// This method returns false if it went off the list, true otherwise.
    +func (al *addressList) increment() bool {
    +	if !al.isValid() {
    +		return false
    +	}
    +	al.idx++
    +	return al.idx < len(al.addresses)
    +}
    +
    +// currentAddress returns the current address pointed to in the addressList.
    +// If the list is in an invalid state, it returns an empty address instead.
    +func (al *addressList) currentAddress() resolver.Address {
    +	if !al.isValid() {
    +		return resolver.Address{}
    +	}
    +	return al.addresses[al.idx]
    +}
    +
    +func (al *addressList) reset() {
    +	al.idx = 0
    +}
    +
    +func (al *addressList) updateAddrs(addrs []resolver.Address) {
    +	al.addresses = addrs
    +	al.reset()
    +}
    +
    +// seekTo returns false if the needle was not found and the current index was
    +// left unchanged.
    +func (al *addressList) seekTo(needle resolver.Address) bool {
    +	for ai, addr := range al.addresses {
    +		if !equalAddressIgnoringBalAttributes(&addr, &needle) {
    +			continue
    +		}
    +		al.idx = ai
    +		return true
    +	}
    +	return false
    +}
    +
    +// hasNext returns whether incrementing the addressList will result in moving
    +// past the end of the list. If the list has already moved past the end, it
    +// returns false.
    +func (al *addressList) hasNext() bool {
    +	if !al.isValid() {
    +		return false
    +	}
    +	return al.idx+1 < len(al.addresses)
    +}
    +
    +// equalAddressIgnoringBalAttributes returns true is a and b are considered
    +// equal. This is different from the Equal method on the resolver.Address type
    +// which considers all fields to determine equality. Here, we only consider
    +// fields that are meaningful to the SubConn.
    +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
    +	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
    +		a.Attributes.Equal(b.Attributes) &&
    +		a.Metadata == b.Metadata
    +}
    diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    index f7031ad225..80a42d2251 100644
    --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
    @@ -22,12 +22,12 @@
     package roundrobin
     
     import (
    +	rand "math/rand/v2"
     	"sync/atomic"
     
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/balancer/base"
     	"google.golang.org/grpc/grpclog"
    -	"google.golang.org/grpc/internal/grpcrand"
     )
     
     // Name is the name of round_robin balancer.
    @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
     		// Start at a random index, as the same RR balancer rebuilds a new
     		// picker when SubConn states change, and we don't want to apply excess
     		// load to the first server in the list.
    -		next: uint32(grpcrand.Intn(len(scs))),
    +		next: uint32(rand.IntN(len(scs))),
     	}
     }
     
    diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
    new file mode 100644
    index 0000000000..ea27c4fa76
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/balancer/subconn.go
    @@ -0,0 +1,134 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package balancer
    +
    +import (
    +	"google.golang.org/grpc/connectivity"
    +	"google.golang.org/grpc/internal"
    +	"google.golang.org/grpc/resolver"
    +)
    +
    +// A SubConn represents a single connection to a gRPC backend service.
    +//
    +// All SubConns start in IDLE, and will not try to connect. To trigger a
    +// connection attempt, Balancers must call Connect.
    +//
    +// If the connection attempt fails, the SubConn will transition to
    +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE.  If the
    +// connection attempt succeeds, it will transition to READY.
    +//
    +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
    +//
    +// If a connection re-enters IDLE, Balancers must call Connect again to trigger
    +// a new connection attempt.
    +//
    +// Each SubConn contains a list of addresses.  gRPC will try to connect to the
    +// addresses in sequence, and stop trying the remainder once the first
    +// connection is successful.  However, this behavior is deprecated.  SubConns
    +// should only use a single address.
    +//
    +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
    +// by custom load balancing poilices.  Users should not need their own complete
    +// implementation of this interface -- they should always delegate to a SubConn
    +// returned by ClientConn.NewSubConn() by embedding it in their implementations.
    +// An embedded SubConn must never be nil, or runtime panics will occur.
    +type SubConn interface {
    +	// UpdateAddresses updates the addresses used in this SubConn.
    +	// gRPC checks if currently-connected address is still in the new list.
    +	// If it's in the list, the connection will be kept.
    +	// If it's not in the list, the connection will gracefully close, and
    +	// a new connection will be created.
    +	//
    +	// This will trigger a state transition for the SubConn.
    +	//
    +	// Deprecated: this method will be removed.  Create new SubConns for new
    +	// addresses instead.
    +	UpdateAddresses([]resolver.Address)
    +	// Connect starts the connecting for this SubConn.
    +	Connect()
    +	// GetOrBuildProducer returns a reference to the existing Producer for this
    +	// ProducerBuilder in this SubConn, or, if one does not currently exist,
    +	// creates a new one and returns it.  Returns a close function which may be
    +	// called when the Producer is no longer needed.  Otherwise the producer
    +	// will automatically be closed upon connection loss or subchannel close.
    +	// Should only be called on a SubConn in state Ready.  Otherwise the
    +	// producer will be unable to create streams.
    +	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
    +	// Shutdown shuts down the SubConn gracefully.  Any started RPCs will be
    +	// allowed to complete.  No future calls should be made on the SubConn.
    +	// One final state update will be delivered to the StateListener (or
    +	// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
    +	// indicate the shutdown operation.  This may be delivered before
    +	// in-progress RPCs are complete and the actual connection is closed.
    +	Shutdown()
    +	// RegisterHealthListener registers a health listener that receives health
    +	// updates for a Ready SubConn. Only one health listener can be registered
    +	// at a time. A health listener should be registered each time the SubConn's
    +	// connectivity state changes to READY. Registering a health listener when
    +	// the connectivity state is not READY may result in undefined behaviour.
    +	// This method must not be called synchronously while handling an update
    +	// from a previously registered health listener.
    +	RegisterHealthListener(func(SubConnState))
    +	// EnforceSubConnEmbedding is included to force implementers to embed
    +	// another implementation of this interface, allowing gRPC to add methods
    +	// without breaking users.
    +	internal.EnforceSubConnEmbedding
    +}
    +
    +// A ProducerBuilder is a simple constructor for a Producer.  It is used by the
    +// SubConn to create producers when needed.
    +type ProducerBuilder interface {
    +	// Build creates a Producer.  The first parameter is always a
    +	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
    +	// associated SubConn), but is declared as `any` to avoid a dependency
    +	// cycle.  Build also returns a close function that will be called when all
    +	// references to the Producer have been given up for a SubConn, or when a
    +	// connectivity state change occurs on the SubConn.  The close function
    +	// should always block until all asynchronous cleanup work is completed.
    +	Build(grpcClientConnInterface any) (p Producer, close func())
    +}
    +
    +// SubConnState describes the state of a SubConn.
    +type SubConnState struct {
    +	// ConnectivityState is the connectivity state of the SubConn.
    +	ConnectivityState connectivity.State
    +	// ConnectionError is set if the ConnectivityState is TransientFailure,
    +	// describing the reason the SubConn failed.  Otherwise, it is nil.
    +	ConnectionError error
    +	// connectedAddr contains the connected address when ConnectivityState is
    +	// Ready. Otherwise, it is indeterminate.
    +	connectedAddress resolver.Address
    +}
    +
    +// connectedAddress returns the connected address for a SubConnState. The
    +// address is only valid if the state is READY.
    +func connectedAddress(scs SubConnState) resolver.Address {
    +	return scs.connectedAddress
    +}
    +
    +// setConnectedAddress sets the connected address for a SubConnState.
    +func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
    +	scs.connectedAddress = addr
    +}
    +
    +// A Producer is a type shared among potentially many consumers.  It is
    +// associated with a SubConn, and an implementation will typically contain
    +// other methods to provide additional functionality, e.g. configuration or
    +// subscription registration.
    +type Producer any
    diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
    index b5e30cff02..905817b5fc 100644
    --- a/vendor/google.golang.org/grpc/balancer_wrapper.go
    +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
    @@ -21,17 +21,21 @@ package grpc
     import (
     	"context"
     	"fmt"
    -	"strings"
     	"sync"
     
     	"google.golang.org/grpc/balancer"
    +	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/connectivity"
    +	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/internal/balancer/gracefulswitch"
     	"google.golang.org/grpc/internal/channelz"
     	"google.golang.org/grpc/internal/grpcsync"
     	"google.golang.org/grpc/resolver"
    +	"google.golang.org/grpc/status"
     )
     
    +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
    +
     // ccBalancerWrapper sits between the ClientConn and the Balancer.
     //
     // ccBalancerWrapper implements methods corresponding to the ones on the
    @@ -66,19 +70,21 @@ type ccBalancerWrapper struct {
     }
     
     // newCCBalancerWrapper creates a new balancer wrapper in idle state. The
    -// underlying balancer is not created until the switchTo() method is invoked.
    +// underlying balancer is not created until the updateClientConnState() method
    +// is invoked.
     func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
     	ctx, cancel := context.WithCancel(cc.ctx)
     	ccb := &ccBalancerWrapper{
     		cc: cc,
     		opts: balancer.BuildOptions{
    -			DialCreds:        cc.dopts.copts.TransportCredentials,
    -			CredsBundle:      cc.dopts.copts.CredsBundle,
    -			Dialer:           cc.dopts.copts.Dialer,
    -			Authority:        cc.authority,
    -			CustomUserAgent:  cc.dopts.copts.UserAgent,
    -			ChannelzParentID: cc.channelzID,
    -			Target:           cc.parsedTarget,
    +			DialCreds:       cc.dopts.copts.TransportCredentials,
    +			CredsBundle:     cc.dopts.copts.CredsBundle,
    +			Dialer:          cc.dopts.copts.Dialer,
    +			Authority:       cc.authority,
    +			CustomUserAgent: cc.dopts.copts.UserAgent,
    +			ChannelzParent:  cc.channelz,
    +			Target:          cc.parsedTarget,
    +			MetricsRecorder: cc.metricsRecorderList,
     		},
     		serializer:       grpcsync.NewCallbackSerializer(ctx),
     		serializerCancel: cancel,
    @@ -92,27 +98,38 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
     // it is safe to call into the balancer here.
     func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
     	errCh := make(chan error)
    -	ok := ccb.serializer.Schedule(func(ctx context.Context) {
    +	uccs := func(ctx context.Context) {
     		defer close(errCh)
     		if ctx.Err() != nil || ccb.balancer == nil {
     			return
     		}
    +		name := gracefulswitch.ChildName(ccs.BalancerConfig)
    +		if ccb.curBalancerName != name {
    +			ccb.curBalancerName = name
    +			channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
    +		}
     		err := ccb.balancer.UpdateClientConnState(*ccs)
     		if logger.V(2) && err != nil {
     			logger.Infof("error from balancer.UpdateClientConnState: %v", err)
     		}
     		errCh <- err
    -	})
    -	if !ok {
    -		return nil
     	}
    +	onFailure := func() { close(errCh) }
    +
    +	// UpdateClientConnState can race with Close, and when the latter wins, the
    +	// serializer is closed, and the attempt to schedule the callback will fail.
    +	// It is acceptable to ignore this failure. But since we want to handle the
    +	// state update in a blocking fashion (when we successfully schedule the
    +	// callback), we have to use the ScheduleOr method and not the MaybeSchedule
    +	// method on the serializer.
    +	ccb.serializer.ScheduleOr(uccs, onFailure)
     	return <-errCh
     }
     
     // resolverError is invoked by grpc to push a resolver error to the underlying
     // balancer.  The call to the balancer is executed from the serializer.
     func (ccb *ccBalancerWrapper) resolverError(err error) {
    -	ccb.serializer.Schedule(func(ctx context.Context) {
    +	ccb.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil || ccb.balancer == nil {
     			return
     		}
    @@ -120,54 +137,6 @@ func (ccb *ccBalancerWrapper) resolverError(err error) {
     	})
     }
     
    -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
    -// LB policy identified by name.
    -//
    -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
    -// first good update from the name resolver, it determines the LB policy to use
    -// and invokes the switchTo() method. Upon receipt of every subsequent update
    -// from the name resolver, it invokes this method.
    -//
    -// the ccBalancerWrapper keeps track of the current LB policy name, and skips
    -// the graceful balancer switching process if the name does not change.
    -func (ccb *ccBalancerWrapper) switchTo(name string) {
    -	ccb.serializer.Schedule(func(ctx context.Context) {
    -		if ctx.Err() != nil || ccb.balancer == nil {
    -			return
    -		}
    -		// TODO: Other languages use case-sensitive balancer registries. We should
    -		// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
    -		if strings.EqualFold(ccb.curBalancerName, name) {
    -			return
    -		}
    -		ccb.buildLoadBalancingPolicy(name)
    -	})
    -}
    -
    -// buildLoadBalancingPolicy performs the following:
    -//   - retrieve a balancer builder for the given name. Use the default LB
    -//     policy, pick_first, if no LB policy with name is found in the registry.
    -//   - instruct the gracefulswitch balancer to switch to the above builder. This
    -//     will actually build the new balancer.
    -//   - update the `curBalancerName` field
    -//
    -// Must be called from a serializer callback.
    -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
    -	builder := balancer.Get(name)
    -	if builder == nil {
    -		channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
    -		builder = newPickfirstBuilder()
    -	} else {
    -		channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
    -	}
    -
    -	if err := ccb.balancer.SwitchTo(builder); err != nil {
    -		channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
    -		return
    -	}
    -	ccb.curBalancerName = builder.Name()
    -}
    -
     // close initiates async shutdown of the wrapper.  cc.mu must be held when
     // calling this function.  To determine the wrapper has finished shutting down,
     // the channel should block on ccb.serializer.Done() without cc.mu held.
    @@ -175,8 +144,8 @@ func (ccb *ccBalancerWrapper) close() {
     	ccb.mu.Lock()
     	ccb.closed = true
     	ccb.mu.Unlock()
    -	channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
    -	ccb.serializer.Schedule(func(context.Context) {
    +	channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
    +	ccb.serializer.TrySchedule(func(context.Context) {
     		if ccb.balancer == nil {
     			return
     		}
    @@ -188,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() {
     
     // exitIdle invokes the balancer's exitIdle method in the serializer.
     func (ccb *ccBalancerWrapper) exitIdle() {
    -	ccb.serializer.Schedule(func(ctx context.Context) {
    +	ccb.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil || ccb.balancer == nil {
     			return
     		}
    @@ -212,7 +181,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
     	}
     	ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
     	if err != nil {
    -		channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
    +		channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
     		return nil, err
     	}
     	acbw := &acBalancerWrapper{
    @@ -220,12 +189,13 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
     		ac:            ac,
     		producers:     make(map[balancer.ProducerBuilder]*refCountedProducer),
     		stateListener: opts.StateListener,
    +		healthData:    newHealthData(connectivity.Idle),
     	}
     	ac.acbw = acbw
     	return acbw, nil
     }
     
    -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
    +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
     	// The graceful switch balancer will never call this.
     	logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
     }
    @@ -241,6 +211,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
     func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
     	ccb.cc.mu.Lock()
     	defer ccb.cc.mu.Unlock()
    +	if ccb.cc.conns == nil {
    +		// The CC has been closed; ignore this update.
    +		return
    +	}
     
     	ccb.mu.Lock()
     	if ccb.closed {
    @@ -281,30 +255,75 @@ func (ccb *ccBalancerWrapper) Target() string {
     // acBalancerWrapper is a wrapper on top of ac for balancers.
     // It implements balancer.SubConn interface.
     type acBalancerWrapper struct {
    +	internal.EnforceSubConnEmbedding
     	ac            *addrConn          // read-only
     	ccb           *ccBalancerWrapper // read-only
     	stateListener func(balancer.SubConnState)
     
    -	mu        sync.Mutex
    -	producers map[balancer.ProducerBuilder]*refCountedProducer
    +	producersMu sync.Mutex
    +	producers   map[balancer.ProducerBuilder]*refCountedProducer
    +
    +	// Access to healthData is protected by healthMu.
    +	healthMu sync.Mutex
    +	// healthData is stored as a pointer to detect when the health listener is
    +	// dropped or updated. This is required as closures can't be compared for
    +	// equality.
    +	healthData *healthData
    +}
    +
    +// healthData holds data related to health state reporting.
    +type healthData struct {
    +	// connectivityState stores the most recent connectivity state delivered
    +	// to the LB policy. This is stored to avoid sending updates when the
    +	// SubConn has already exited connectivity state READY.
    +	connectivityState connectivity.State
    +}
    +
    +func newHealthData(s connectivity.State) *healthData {
    +	return &healthData{connectivityState: s}
     }
     
     // updateState is invoked by grpc to push a subConn state update to the
     // underlying balancer.
    -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
    -	acbw.ccb.serializer.Schedule(func(ctx context.Context) {
    +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
    +	acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil || acbw.ccb.balancer == nil {
     			return
     		}
    +		// Invalidate all producers on any state change.
    +		acbw.closeProducers()
    +
     		// Even though it is optional for balancers, gracefulswitch ensures
     		// opts.StateListener is set, so this cannot ever be nil.
     		// TODO: delete this comment when UpdateSubConnState is removed.
    -		acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
    +		scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
    +		if s == connectivity.Ready {
    +			setConnectedAddress(&scs, curAddr)
    +		}
    +		// Invalidate the health listener by updating the healthData.
    +		acbw.healthMu.Lock()
    +		// A race may occur if a health listener is registered soon after the
    +		// connectivity state is set but before the stateListener is called.
    +		// Two cases may arise:
    +		// 1. The new state is not READY: RegisterHealthListener has checks to
    +		//    ensure no updates are sent when the connectivity state is not
    +		//    READY.
    +		// 2. The new state is READY: This means that the old state wasn't Ready.
    +		//    The RegisterHealthListener API mentions that a health listener
    +		//    must not be registered when a SubConn is not ready to avoid such
    +		//    races. When this happens, the LB policy would get health updates
    +		//    on the old listener. When the LB policy registers a new listener
    +		//    on receiving the connectivity update, the health updates will be
    +		//    sent to the new health listener.
    +		acbw.healthData = newHealthData(scs.ConnectivityState)
    +		acbw.healthMu.Unlock()
    +
    +		acbw.stateListener(scs)
     	})
     }
     
     func (acbw *acBalancerWrapper) String() string {
    -	return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
    +	return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
     }
     
     func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
    @@ -316,6 +335,7 @@ func (acbw *acBalancerWrapper) Connect() {
     }
     
     func (acbw *acBalancerWrapper) Shutdown() {
    +	acbw.closeProducers()
     	acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
     }
     
    @@ -323,9 +343,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
     // ready, blocks until it is or ctx expires.  Returns an error when the context
     // expires or the addrConn is shut down.
     func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
    -	transport, err := acbw.ac.getTransport(ctx)
    -	if err != nil {
    -		return nil, err
    +	transport := acbw.ac.getReadyTransport()
    +	if transport == nil {
    +		return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
    +
     	}
     	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
     }
    @@ -350,15 +371,15 @@ type refCountedProducer struct {
     }
     
     func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
    -	acbw.mu.Lock()
    -	defer acbw.mu.Unlock()
    +	acbw.producersMu.Lock()
    +	defer acbw.producersMu.Unlock()
     
     	// Look up existing producer from this builder.
     	pData := acbw.producers[pb]
     	if pData == nil {
     		// Not found; create a new one and add it to the producers map.
    -		p, close := pb.Build(acbw)
    -		pData = &refCountedProducer{producer: p, close: close}
    +		p, closeFn := pb.Build(acbw)
    +		pData = &refCountedProducer{producer: p, close: closeFn}
     		acbw.producers[pb] = pData
     	}
     	// Account for this new reference.
    @@ -368,13 +389,64 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
     	// and delete the refCountedProducer from the map if the total reference
     	// count goes to zero.
     	unref := func() {
    -		acbw.mu.Lock()
    +		acbw.producersMu.Lock()
    +		// If closeProducers has already closed this producer instance, refs is
    +		// set to 0, so the check after decrementing will never pass, and the
    +		// producer will not be double-closed.
     		pData.refs--
     		if pData.refs == 0 {
     			defer pData.close() // Run outside the acbw mutex
     			delete(acbw.producers, pb)
     		}
    -		acbw.mu.Unlock()
    +		acbw.producersMu.Unlock()
     	}
     	return pData.producer, grpcsync.OnceFunc(unref)
     }
    +
    +func (acbw *acBalancerWrapper) closeProducers() {
    +	acbw.producersMu.Lock()
    +	defer acbw.producersMu.Unlock()
    +	for pb, pData := range acbw.producers {
    +		pData.refs = 0
    +		pData.close()
    +		delete(acbw.producers, pb)
    +	}
    +}
    +
    +// RegisterHealthListener accepts a health listener from the LB policy. It sends
    +// updates to the health listener as long as the SubConn's connectivity state
    +// doesn't change and a new health listener is not registered. To invalidate
    +// the currently registered health listener, acbw updates the healthData. If a
    +// nil listener is registered, the active health listener is dropped.
    +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
    +	acbw.healthMu.Lock()
    +	defer acbw.healthMu.Unlock()
    +	// listeners should not be registered when the connectivity state
    +	// isn't Ready. This may happen when the balancer registers a listener
    +	// after the connectivityState is updated, but before it is notified
    +	// of the update.
    +	if acbw.healthData.connectivityState != connectivity.Ready {
    +		return
    +	}
    +	// Replace the health data to stop sending updates to any previously
    +	// registered health listeners.
    +	hd := newHealthData(connectivity.Ready)
    +	acbw.healthData = hd
    +	if listener == nil {
    +		return
    +	}
    +
    +	acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
    +		if ctx.Err() != nil || acbw.ccb.balancer == nil {
    +			return
    +		}
    +		// Don't send updates if a new listener is registered.
    +		acbw.healthMu.Lock()
    +		defer acbw.healthMu.Unlock()
    +		curHD := acbw.healthData
    +		if curHD != hd {
    +			return
    +		}
    +		listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
    +	})
    +}
    diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    index e9e97d4511..9e9d080699 100644
    --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
    @@ -18,8 +18,8 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.31.0
    -// 	protoc        v4.22.0
    +// 	protoc-gen-go v1.35.1
    +// 	protoc        v5.27.1
     // source: grpc/binlog/v1/binarylog.proto
     
     package grpc_binarylog_v1
    @@ -274,11 +274,9 @@ type GrpcLogEntry struct {
     
     func (x *GrpcLogEntry) Reset() {
     	*x = GrpcLogEntry{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *GrpcLogEntry) String() string {
    @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {}
     
     func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -440,11 +438,9 @@ type ClientHeader struct {
     
     func (x *ClientHeader) Reset() {
     	*x = ClientHeader{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ClientHeader) String() string {
    @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {}
     
     func (x *ClientHeader) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -509,11 +505,9 @@ type ServerHeader struct {
     
     func (x *ServerHeader) Reset() {
     	*x = ServerHeader{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ServerHeader) String() string {
    @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {}
     
     func (x *ServerHeader) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -565,11 +559,9 @@ type Trailer struct {
     
     func (x *Trailer) Reset() {
     	*x = Trailer{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Trailer) String() string {
    @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {}
     
     func (x *Trailer) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -638,11 +630,9 @@ type Message struct {
     
     func (x *Message) Reset() {
     	*x = Message{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Message) String() string {
    @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {}
     
     func (x *Message) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -713,11 +703,9 @@ type Metadata struct {
     
     func (x *Metadata) Reset() {
     	*x = Metadata{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Metadata) String() string {
    @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {}
     
     func (x *Metadata) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -762,11 +750,9 @@ type MetadataEntry struct {
     
     func (x *MetadataEntry) Reset() {
     	*x = MetadataEntry{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *MetadataEntry) String() string {
    @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {}
     
     func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -820,11 +806,9 @@ type Address struct {
     
     func (x *Address) Reset() {
     	*x = Address{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Address) String() string {
    @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {}
     
     func (x *Address) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1015,7 +999,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
     
     var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
     var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
    -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
    +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
     	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
     	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
     	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type
    @@ -1057,105 +1041,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
     	if File_grpc_binlog_v1_binarylog_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*GrpcLogEntry); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ClientHeader); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ServerHeader); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Trailer); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Message); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Metadata); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*MetadataEntry); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Address); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
    -	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
    +	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
     		(*GrpcLogEntry_ClientHeader)(nil),
     		(*GrpcLogEntry_ServerHeader)(nil),
     		(*GrpcLogEntry_Message)(nil),
    diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
    index f6e815e6bf..4f57b55434 100644
    --- a/vendor/google.golang.org/grpc/clientconn.go
    +++ b/vendor/google.golang.org/grpc/clientconn.go
    @@ -24,6 +24,7 @@ import (
     	"fmt"
     	"math"
     	"net/url"
    +	"slices"
     	"strings"
     	"sync"
     	"sync/atomic"
    @@ -31,14 +32,15 @@ import (
     
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/balancer/base"
    +	"google.golang.org/grpc/balancer/pickfirst"
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/connectivity"
     	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/internal/channelz"
     	"google.golang.org/grpc/internal/grpcsync"
     	"google.golang.org/grpc/internal/idle"
    -	"google.golang.org/grpc/internal/pretty"
     	iresolver "google.golang.org/grpc/internal/resolver"
    +	"google.golang.org/grpc/internal/stats"
     	"google.golang.org/grpc/internal/transport"
     	"google.golang.org/grpc/keepalive"
     	"google.golang.org/grpc/resolver"
    @@ -67,12 +69,14 @@ var (
     	errConnDrain = errors.New("grpc: the connection is drained")
     	// errConnClosing indicates that the connection is closing.
     	errConnClosing = errors.New("grpc: the connection is closing")
    -	// errConnIdling indicates the the connection is being closed as the channel
    +	// errConnIdling indicates the connection is being closed as the channel
     	// is moving to an idle mode due to inactivity.
     	errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
     	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
     	// service config.
     	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
    +	// PickFirstBalancerName is the name of the pick_first balancer.
    +	PickFirstBalancerName = pickfirst.Name
     )
     
     // The following errors are returned from Dial and DialContext
    @@ -101,11 +105,6 @@ const (
     	defaultReadBufSize  = 32 * 1024
     )
     
    -// Dial creates a client connection to the given target.
    -func Dial(target string, opts ...DialOption) (*ClientConn, error) {
    -	return DialContext(context.Background(), target, opts...)
    -}
    -
     type defaultConfigSelector struct {
     	sc *ServiceConfig
     }
    @@ -117,13 +116,23 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
     	}, nil
     }
     
    -// newClient returns a new client in idle mode.
    -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
    +// NewClient creates a new gRPC "channel" for the target URI provided.  No I/O
    +// is performed.  Use of the ClientConn for RPCs will automatically cause it to
    +// connect.  Connect may be used to manually create a connection, but for most
    +// users this is unnecessary.
    +//
    +// The target name syntax is defined in
    +// https://github.com/grpc/grpc/blob/master/doc/naming.md.  e.g. to use dns
    +// resolver, a "dns:///" prefix should be applied to the target.
    +//
    +// The DialOptions returned by WithBlock, WithTimeout,
    +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
    +// function.
    +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
     	cc := &ClientConn{
     		target: target,
     		conns:  make(map[*addrConn]struct{}),
     		dopts:  defaultDialOptions(),
    -		czData: new(channelzData),
     	}
     
     	cc.retryThrottler.Store((*retryThrottler)(nil))
    @@ -148,6 +157,16 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error)
     	for _, opt := range opts {
     		opt.apply(&cc.dopts)
     	}
    +
    +	// Determine the resolver to use.
    +	if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
    +		return nil, err
    +	}
    +
    +	for _, opt := range globalPerTargetDialOptions {
    +		opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
    +	}
    +
     	chainUnaryClientInterceptors(cc)
     	chainStreamClientInterceptors(cc)
     
    @@ -156,7 +175,7 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error)
     	}
     
     	if cc.dopts.defaultServiceConfigRawJSON != nil {
    -		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
    +		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
     		if scpr.Err != nil {
     			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
     		}
    @@ -164,66 +183,57 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error)
     	}
     	cc.mkp = cc.dopts.copts.KeepaliveParams
     
    -	// Register ClientConn with channelz.
    -	cc.channelzRegistration(target)
    -
    -	// TODO: Ideally it should be impossible to error from this function after
    -	// channelz registration.  This will require removing some channelz logs
    -	// from the following functions that can error.  Errors can be returned to
    -	// the user, and successful logs can be emitted here, after the checks have
    -	// passed and channelz is subsequently registered.
    -
    -	// Determine the resolver to use.
    -	if err := cc.parseTargetAndFindResolver(); err != nil {
    -		channelz.RemoveEntry(cc.channelzID)
    -		return nil, err
    -	}
    -	if err = cc.determineAuthority(); err != nil {
    -		channelz.RemoveEntry(cc.channelzID)
    +	if err = cc.initAuthority(); err != nil {
     		return nil, err
     	}
     
    -	cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
    +	// Register ClientConn with channelz. Note that this is only done after
    +	// channel creation cannot fail.
    +	cc.channelzRegistration(target)
    +	channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
    +	channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
    +
    +	cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
     	cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
     
    +	cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
    +
     	cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
     	cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
    +
     	return cc, nil
     }
     
    -// DialContext creates a client connection to the given target. By default, it's
    -// a non-blocking dial (the function won't wait for connections to be
    -// established, and connecting happens in the background). To make it a blocking
    -// dial, use WithBlock() dial option.
    +// Dial calls DialContext(context.Background(), target, opts...).
     //
    -// In the non-blocking case, the ctx does not act against the connection. It
    -// only controls the setup steps.
    +// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
    +func Dial(target string, opts ...DialOption) (*ClientConn, error) {
    +	return DialContext(context.Background(), target, opts...)
    +}
    +
    +// DialContext calls NewClient and then exits idle mode.  If WithBlock(true) is
    +// used, it calls Connect and WaitForStateChange until either the context
    +// expires or the state of the ClientConn is Ready.
     //
    -// In the blocking case, ctx can be used to cancel or expire the pending
    -// connection. Once this function returns, the cancellation and expiration of
    -// ctx will be noop. Users should call ClientConn.Close to terminate all the
    -// pending operations after this function returns.
    +// One subtle difference between NewClient and Dial and DialContext is that the
    +// former uses "dns" as the default name resolver, while the latter use
    +// "passthrough" for backward compatibility.  This distinction should not matter
    +// to most users, but could matter to legacy users that specify a custom dialer
    +// and expect it to receive the target string directly.
     //
    -// The target name syntax is defined in
    -// https://github.com/grpc/grpc/blob/master/doc/naming.md.
    -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
    +// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
     func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
    -	cc, err := newClient(target, opts...)
    +	// At the end of this method, we kick the channel out of idle, rather than
    +	// waiting for the first rpc.
    +	opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
    +	cc, err := NewClient(target, opts...)
     	if err != nil {
     		return nil, err
     	}
     
     	// We start the channel off in idle mode, but kick it out of idle now,
    -	// instead of waiting for the first RPC. Other gRPC implementations do wait
    -	// for the first RPC to kick the channel out of idle. But doing so would be
    -	// a major behavior change for our users who are used to seeing the channel
    -	// active after Dial.
    -	//
    -	// Taking this approach of kicking it out of idle at the end of this method
    -	// allows us to share the code between channel creation and exiting idle
    -	// mode. This will also make it easy for us to switch to starting the
    -	// channel off in idle, i.e. by making newClient exported.
    -
    +	// instead of waiting for the first RPC.  This is the legacy behavior of
    +	// Dial.
     	defer func() {
     		if err != nil {
     			cc.Close()
    @@ -291,17 +301,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
     // addTraceEvent is a helper method to add a trace event on the channel. If the
     // channel is a nested one, the same event is also added on the parent channel.
     func (cc *ClientConn) addTraceEvent(msg string) {
    -	ted := &channelz.TraceEventDesc{
    +	ted := &channelz.TraceEvent{
     		Desc:     fmt.Sprintf("Channel %s", msg),
     		Severity: channelz.CtInfo,
     	}
    -	if cc.dopts.channelzParentID != nil {
    -		ted.Parent = &channelz.TraceEventDesc{
    -			Desc:     fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
    +	if cc.dopts.channelzParent != nil {
    +		ted.Parent = &channelz.TraceEvent{
    +			Desc:     fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
     			Severity: channelz.CtInfo,
     		}
     	}
    -	channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
    +	channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
     }
     
     type idler ClientConn
    @@ -418,14 +428,15 @@ func (cc *ClientConn) validateTransportCredentials() error {
     }
     
     // channelzRegistration registers the newly created ClientConn with channelz and
    -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
    -// A channelz trace event is emitted for ClientConn creation. If the newly
    -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
    -// specified via a dial option, the trace event is also added to the parent.
    +// stores the returned identifier in `cc.channelz`.  A channelz trace event is
    +// emitted for ClientConn creation. If the newly created ClientConn is a nested
    +// one, i.e a valid parent ClientConn ID is specified via a dial option, the
    +// trace event is also added to the parent.
     //
     // Doesn't grab cc.mu as this method is expected to be called only at Dial time.
     func (cc *ClientConn) channelzRegistration(target string) {
    -	cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
    +	parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
    +	cc.channelz = channelz.RegisterChannel(parentChannel, target)
     	cc.addTraceEvent("created")
     }
     
    @@ -492,11 +503,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr
     }
     
     // newConnectivityStateManager creates an connectivityStateManager with
    -// the specified id.
    -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager {
    +// the specified channel.
    +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
     	return &connectivityStateManager{
    -		channelzID: id,
    -		pubSub:     grpcsync.NewPubSub(ctx),
    +		channelz: channel,
    +		pubSub:   grpcsync.NewPubSub(ctx),
     	}
     }
     
    @@ -510,7 +521,7 @@ type connectivityStateManager struct {
     	mu         sync.Mutex
     	state      connectivity.State
     	notifyChan chan struct{}
    -	channelzID *channelz.Identifier
    +	channelz   *channelz.Channel
     	pubSub     *grpcsync.PubSub
     }
     
    @@ -527,9 +538,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
     		return
     	}
     	csm.state = state
    +	csm.channelz.ChannelMetrics.State.Store(&state)
     	csm.pubSub.Publish(state)
     
    -	channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
    +	channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
     	if csm.notifyChan != nil {
     		// There are other goroutines waiting on this channel.
     		close(csm.notifyChan)
    @@ -583,20 +595,20 @@ type ClientConn struct {
     	cancel context.CancelFunc // Cancelled on close.
     
     	// The following are initialized at dial time, and are read-only after that.
    -	target          string               // User's dial target.
    -	parsedTarget    resolver.Target      // See parseTargetAndFindResolver().
    -	authority       string               // See determineAuthority().
    -	dopts           dialOptions          // Default and user specified dial options.
    -	channelzID      *channelz.Identifier // Channelz identifier for the channel.
    -	resolverBuilder resolver.Builder     // See parseTargetAndFindResolver().
    -	idlenessMgr     *idle.Manager
    +	target              string            // User's dial target.
    +	parsedTarget        resolver.Target   // See initParsedTargetAndResolverBuilder().
    +	authority           string            // See initAuthority().
    +	dopts               dialOptions       // Default and user specified dial options.
    +	channelz            *channelz.Channel // Channelz object.
    +	resolverBuilder     resolver.Builder  // See initParsedTargetAndResolverBuilder().
    +	idlenessMgr         *idle.Manager
    +	metricsRecorderList *stats.MetricsRecorderList
     
     	// The following provide their own synchronization, and therefore don't
     	// require cc.mu to be held to access them.
     	csMgr              *connectivityStateManager
     	pickerWrapper      *pickerWrapper
     	safeConfigSelector iresolver.SafeConfigSelector
    -	czData             *channelzData
     	retryThrottler     atomic.Value // Updated from service config.
     
     	// mu protects the following fields.
    @@ -620,11 +632,6 @@ type ClientConn struct {
     
     // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
     // ctx expires. A true value is returned in former case and false in latter.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
     func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
     	ch := cc.csMgr.getNotifyChan()
     	if cc.csMgr.getState() != sourceState {
    @@ -639,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
     }
     
     // GetState returns the connectivity.State of ClientConn.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
    -// release.
     func (cc *ClientConn) GetState() connectivity.State {
     	return cc.csMgr.getState()
     }
    @@ -690,7 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
     var emptyServiceConfig *ServiceConfig
     
     func init() {
    -	cfg := parseServiceConfig("{}")
    +	cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
     	if cfg.Err != nil {
     		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
     	}
    @@ -707,15 +709,15 @@ func init() {
     	}
     }
     
    -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
    +func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
     	if cc.sc != nil {
    -		cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
    +		cc.applyServiceConfigAndBalancer(cc.sc, nil)
     		return
     	}
     	if cc.dopts.defaultServiceConfig != nil {
    -		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
    +		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
     	} else {
    -		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
    +		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
     	}
     }
     
    @@ -733,7 +735,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
     		// May need to apply the initial service config in case the resolver
     		// doesn't support service configs, or doesn't provide a service config
     		// with the new addresses.
    -		cc.maybeApplyDefaultServiceConfig(nil)
    +		cc.maybeApplyDefaultServiceConfig()
     
     		cc.balancerWrapper.resolverError(err)
     
    @@ -744,10 +746,10 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
     
     	var ret error
     	if cc.dopts.disableServiceConfig {
    -		channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
    -		cc.maybeApplyDefaultServiceConfig(s.Addresses)
    +		channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
    +		cc.maybeApplyDefaultServiceConfig()
     	} else if s.ServiceConfig == nil {
    -		cc.maybeApplyDefaultServiceConfig(s.Addresses)
    +		cc.maybeApplyDefaultServiceConfig()
     		// TODO: do we need to apply a failing LB policy if there is no
     		// default, per the error handling design?
     	} else {
    @@ -755,12 +757,12 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
     			configSelector := iresolver.GetConfigSelector(s)
     			if configSelector != nil {
     				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
    -					channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
    +					channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
     				}
     			} else {
     				configSelector = &defaultConfigSelector{sc}
     			}
    -			cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
    +			cc.applyServiceConfigAndBalancer(sc, configSelector)
     		} else {
     			ret = balancer.ErrBadResolverState
     			if cc.sc == nil {
    @@ -773,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
     		}
     	}
     
    -	var balCfg serviceconfig.LoadBalancingConfig
    -	if cc.sc != nil && cc.sc.lbConfig != nil {
    -		balCfg = cc.sc.lbConfig.cfg
    -	}
    +	balCfg := cc.sc.lbConfig
     	bw := cc.balancerWrapper
     	cc.mu.Unlock()
     
    @@ -806,17 +805,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
     	cc.csMgr.updateState(connectivity.TransientFailure)
     }
     
    -// Makes a copy of the input addresses slice and clears out the balancer
    -// attributes field. Addresses are passed during subconn creation and address
    -// update operations. In both cases, we will clear the balancer attributes by
    -// calling this function, and therefore we will be able to use the Equal method
    -// provided by the resolver.Address type for comparison.
    -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
    +// Makes a copy of the input addresses slice. Addresses are passed during
    +// subconn creation and address update operations.
    +func copyAddresses(in []resolver.Address) []resolver.Address {
     	out := make([]resolver.Address, len(in))
    -	for i := range in {
    -		out[i] = in[i]
    -		out[i].BalancerAttributes = nil
    -	}
    +	copy(out, in)
     	return out
     }
     
    @@ -831,25 +824,22 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
     	ac := &addrConn{
     		state:        connectivity.Idle,
     		cc:           cc,
    -		addrs:        copyAddressesWithoutBalancerAttributes(addrs),
    +		addrs:        copyAddresses(addrs),
     		scopts:       opts,
     		dopts:        cc.dopts,
    -		czData:       new(channelzData),
    +		channelz:     channelz.RegisterSubChannel(cc.channelz, ""),
     		resetBackoff: make(chan struct{}),
    -		stateChan:    make(chan struct{}),
     	}
     	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
    +	// Start with our address set to the first address; this may be updated if
    +	// we connect to different addresses.
    +	ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
     
    -	var err error
    -	ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
    -	if err != nil {
    -		return nil, err
    -	}
    -	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
    +	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
     		Desc:     "Subchannel created",
     		Severity: channelz.CtInfo,
    -		Parent: &channelz.TraceEventDesc{
    -			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
    +		Parent: &channelz.TraceEvent{
    +			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
     			Severity: channelz.CtInfo,
     		},
     	})
    @@ -872,38 +862,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
     	ac.tearDown(err)
     }
     
    -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
    -	return &channelz.ChannelInternalMetric{
    -		State:                    cc.GetState(),
    -		Target:                   cc.target,
    -		CallsStarted:             atomic.LoadInt64(&cc.czData.callsStarted),
    -		CallsSucceeded:           atomic.LoadInt64(&cc.czData.callsSucceeded),
    -		CallsFailed:              atomic.LoadInt64(&cc.czData.callsFailed),
    -		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
    -	}
    -}
    -
     // Target returns the target string of the ClientConn.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
     func (cc *ClientConn) Target() string {
     	return cc.target
     }
     
    +// CanonicalTarget returns the canonical target string of the ClientConn.
    +func (cc *ClientConn) CanonicalTarget() string {
    +	return cc.parsedTarget.String()
    +}
    +
     func (cc *ClientConn) incrCallsStarted() {
    -	atomic.AddInt64(&cc.czData.callsStarted, 1)
    -	atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
    +	cc.channelz.ChannelMetrics.CallsStarted.Add(1)
    +	cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
     }
     
     func (cc *ClientConn) incrCallsSucceeded() {
    -	atomic.AddInt64(&cc.czData.callsSucceeded, 1)
    +	cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
     }
     
     func (cc *ClientConn) incrCallsFailed() {
    -	atomic.AddInt64(&cc.czData.callsFailed, 1)
    +	cc.channelz.ChannelMetrics.CallsFailed.Add(1)
     }
     
     // connect starts creating a transport.
    @@ -925,32 +904,37 @@ func (ac *addrConn) connect() error {
     		ac.mu.Unlock()
     		return nil
     	}
    -	ac.mu.Unlock()
     
    -	ac.resetTransport()
    +	ac.resetTransportAndUnlock()
     	return nil
     }
     
    -func equalAddresses(a, b []resolver.Address) bool {
    -	if len(a) != len(b) {
    -		return false
    -	}
    -	for i, v := range a {
    -		if !v.Equal(b[i]) {
    -			return false
    -		}
    -	}
    -	return true
    +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
    +// This is different from the Equal method on the resolver.Address type which
    +// considers all fields to determine equality. Here, we only consider fields
    +// that are meaningful to the subConn.
    +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
    +	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
    +		a.Attributes.Equal(b.Attributes) &&
    +		a.Metadata == b.Metadata
    +}
    +
    +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
    +	return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
     }
     
     // updateAddrs updates ac.addrs with the new addresses list and handles active
     // connections or connection attempts.
     func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
    -	ac.mu.Lock()
    -	channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
    +	addrs = copyAddresses(addrs)
    +	limit := len(addrs)
    +	if limit > 5 {
    +		limit = 5
    +	}
    +	channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
     
    -	addrs = copyAddressesWithoutBalancerAttributes(addrs)
    -	if equalAddresses(ac.addrs, addrs) {
    +	ac.mu.Lock()
    +	if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
     		ac.mu.Unlock()
     		return
     	}
    @@ -969,7 +953,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
     		// Try to find the connected address.
     		for _, a := range addrs {
     			a.ServerName = ac.cc.getServerName(a)
    -			if a.Equal(ac.curAddr) {
    +			if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
     				// We are connected to a valid address, so do nothing but
     				// update the addresses.
     				ac.mu.Unlock()
    @@ -995,11 +979,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
     		ac.updateConnectivityState(connectivity.Idle, nil)
     	}
     
    -	ac.mu.Unlock()
    -
     	// Since we were connecting/connected, we should start a new connection
     	// attempt.
    -	go ac.resetTransport()
    +	go ac.resetTransportAndUnlock()
     }
     
     // getServerName determines the serverName to be used in the connection
    @@ -1067,7 +1049,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
     	})
     }
     
    -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
    +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
     	if sc == nil {
     		// should never reach here.
     		return
    @@ -1088,17 +1070,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
     	} else {
     		cc.retryThrottler.Store((*retryThrottler)(nil))
     	}
    -
    -	var newBalancerName string
    -	if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) {
    -		// No service config or no LB policy specified in config.
    -		newBalancerName = PickFirstBalancerName
    -	} else if cc.sc.lbConfig != nil {
    -		newBalancerName = cc.sc.lbConfig.name
    -	} else { // cc.sc.LB != nil
    -		newBalancerName = *cc.sc.LB
    -	}
    -	cc.balancerWrapper.switchTo(newBalancerName)
     }
     
     func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
    @@ -1166,15 +1137,20 @@ func (cc *ClientConn) Close() error {
     
     	<-cc.resolverWrapper.serializer.Done()
     	<-cc.balancerWrapper.serializer.Done()
    -
    +	var wg sync.WaitGroup
     	for ac := range conns {
    -		ac.tearDown(ErrClientConnClosing)
    +		wg.Add(1)
    +		go func(ac *addrConn) {
    +			defer wg.Done()
    +			ac.tearDown(ErrClientConnClosing)
    +		}(ac)
     	}
    +	wg.Wait()
     	cc.addTraceEvent("deleted")
     	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
     	// trace reference to the entity being deleted, and thus prevent it from being
     	// deleted right away.
    -	channelz.RemoveEntry(cc.channelzID)
    +	channelz.RemoveEntry(cc.channelz.ID)
     
     	return nil
     }
    @@ -1195,19 +1171,21 @@ type addrConn struct {
     	// is received, transport is closed, ac has been torn down).
     	transport transport.ClientTransport // The current transport.
     
    +	// This mutex is used on the RPC path, so its usage should be minimized as
    +	// much as possible.
    +	// TODO: Find a lock-free way to retrieve the transport and state from the
    +	// addrConn.
     	mu      sync.Mutex
     	curAddr resolver.Address   // The current address.
     	addrs   []resolver.Address // All addresses that the resolver resolved to.
     
     	// Use updateConnectivityState for updating addrConn's connectivity state.
    -	state     connectivity.State
    -	stateChan chan struct{} // closed and recreated on every state change.
    +	state connectivity.State
     
     	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
     	resetBackoff chan struct{}
     
    -	channelzID *channelz.Identifier
    -	czData     *channelzData
    +	channelz *channelz.SubChannel
     }
     
     // Note: this requires a lock on ac.mu.
    @@ -1215,16 +1193,14 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
     	if ac.state == s {
     		return
     	}
    -	// When changing states, reset the state change channel.
    -	close(ac.stateChan)
    -	ac.stateChan = make(chan struct{})
     	ac.state = s
    +	ac.channelz.ChannelMetrics.State.Store(&s)
     	if lastErr == nil {
    -		channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
    +		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
     	} else {
    -		channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
    +		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
     	}
    -	ac.acbw.updateState(s, lastErr)
    +	ac.acbw.updateState(s, ac.curAddr, lastErr)
     }
     
     // adjustParams updates parameters used to create transports upon
    @@ -1241,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
     	}
     }
     
    -func (ac *addrConn) resetTransport() {
    -	ac.mu.Lock()
    +// resetTransportAndUnlock unconditionally connects the addrConn.
    +//
    +// ac.mu must be held by the caller, and this function will guarantee it is released.
    +func (ac *addrConn) resetTransportAndUnlock() {
     	acCtx := ac.ctx
     	if acCtx.Err() != nil {
     		ac.mu.Unlock()
    @@ -1273,6 +1251,8 @@ func (ac *addrConn) resetTransport() {
     	ac.mu.Unlock()
     
     	if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
    +		// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
    +		// to ensure one resolution request per pass instead of per subconn failure.
     		ac.cc.resolveNow(resolver.ResolveNowOptions{})
     		ac.mu.Lock()
     		if acCtx.Err() != nil {
    @@ -1314,12 +1294,13 @@ func (ac *addrConn) resetTransport() {
     	ac.mu.Unlock()
     }
     
    -// tryAllAddrs tries to creates a connection to the addresses, and stop when at
    +// tryAllAddrs tries to create a connection to the addresses, and stop when at
     // the first successful one. It returns an error if no address was successfully
     // connected, or updates ac appropriately with the new transport.
     func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
     	var firstConnErr error
     	for _, addr := range addrs {
    +		ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
     		if ctx.Err() != nil {
     			return errConnClosing
     		}
    @@ -1335,7 +1316,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c
     		}
     		ac.mu.Unlock()
     
    -		channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
    +		channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
     
     		err := ac.createTransport(ctx, addr, copts, connectDeadline)
     		if err == nil {
    @@ -1388,16 +1369,16 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
     
     	connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
     	defer cancel()
    -	copts.ChannelzParentID = ac.channelzID
    +	copts.ChannelzParent = ac.channelz
     
    -	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
    +	newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
     	if err != nil {
     		if logger.V(2) {
     			logger.Infof("Creating new client transport to %q: %v", addr, err)
     		}
     		// newTr is either nil, or closed.
     		hcancel()
    -		channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
    +		channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
     		return err
     	}
     
    @@ -1464,12 +1445,12 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
     	if !ac.scopts.HealthCheckEnabled {
     		return
     	}
    -	healthCheckFunc := ac.cc.dopts.healthCheckFunc
    +	healthCheckFunc := internal.HealthCheckFunc
     	if healthCheckFunc == nil {
     		// The health package is not imported to set health check function.
     		//
     		// TODO: add a link to the health check doc in the error message.
    -		channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
    +		channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
     		return
     	}
     
    @@ -1496,12 +1477,12 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
     	}
     	// Start the health checking stream.
     	go func() {
    -		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
    +		err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
     		if err != nil {
     			if status.Code(err) == codes.Unimplemented {
    -				channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
    +				channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
     			} else {
    -				channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
    +				channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
     			}
     		}
     	}()
    @@ -1525,29 +1506,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
     	return nil
     }
     
    -// getTransport waits until the addrconn is ready and returns the transport.
    -// If the context expires first, returns an appropriate status.  If the
    -// addrConn is stopped first, returns an Unavailable status error.
    -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
    -	for ctx.Err() == nil {
    -		ac.mu.Lock()
    -		t, state, sc := ac.transport, ac.state, ac.stateChan
    -		ac.mu.Unlock()
    -		if state == connectivity.Ready {
    -			return t, nil
    -		}
    -		if state == connectivity.Shutdown {
    -			return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
    -		}
    -
    -		select {
    -		case <-ctx.Done():
    -		case <-sc:
    -		}
    -	}
    -	return nil, status.FromContextError(ctx.Err()).Err()
    -}
    -
     // tearDown starts to tear down the addrConn.
     //
     // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
    @@ -1566,18 +1524,18 @@ func (ac *addrConn) tearDown(err error) {
     	ac.cancel()
     	ac.curAddr = resolver.Address{}
     
    -	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
    +	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
     		Desc:     "Subchannel deleted",
     		Severity: channelz.CtInfo,
    -		Parent: &channelz.TraceEventDesc{
    -			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
    +		Parent: &channelz.TraceEvent{
    +			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
     			Severity: channelz.CtInfo,
     		},
     	})
     	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
     	// trace reference to the entity being deleted, and thus prevent it from
     	// being deleted right away.
    -	channelz.RemoveEntry(ac.channelzID)
    +	channelz.RemoveEntry(ac.channelz.ID)
     	ac.mu.Unlock()
     
     	// We have to release the lock before the call to GracefulClose/Close here
    @@ -1594,7 +1552,7 @@ func (ac *addrConn) tearDown(err error) {
     		} else {
     			// Hard close the transport when the channel is entering idle or is
     			// being shutdown. In the case where the channel is being shutdown,
    -			// closing of transports is also taken care of by cancelation of cc.ctx.
    +			// closing of transports is also taken care of by cancellation of cc.ctx.
     			// But in the case where the channel is entering idle, we need to
     			// explicitly close the transports here. Instead of distinguishing
     			// between these two cases, it is simpler to close the transport
    @@ -1604,39 +1562,6 @@ func (ac *addrConn) tearDown(err error) {
     	}
     }
     
    -func (ac *addrConn) getState() connectivity.State {
    -	ac.mu.Lock()
    -	defer ac.mu.Unlock()
    -	return ac.state
    -}
    -
    -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
    -	ac.mu.Lock()
    -	addr := ac.curAddr.Addr
    -	ac.mu.Unlock()
    -	return &channelz.ChannelInternalMetric{
    -		State:                    ac.getState(),
    -		Target:                   addr,
    -		CallsStarted:             atomic.LoadInt64(&ac.czData.callsStarted),
    -		CallsSucceeded:           atomic.LoadInt64(&ac.czData.callsSucceeded),
    -		CallsFailed:              atomic.LoadInt64(&ac.czData.callsFailed),
    -		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
    -	}
    -}
    -
    -func (ac *addrConn) incrCallsStarted() {
    -	atomic.AddInt64(&ac.czData.callsStarted, 1)
    -	atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
    -}
    -
    -func (ac *addrConn) incrCallsSucceeded() {
    -	atomic.AddInt64(&ac.czData.callsSucceeded, 1)
    -}
    -
    -func (ac *addrConn) incrCallsFailed() {
    -	atomic.AddInt64(&ac.czData.callsFailed, 1)
    -}
    -
     type retryThrottler struct {
     	max    float64
     	thresh float64
    @@ -1674,12 +1599,17 @@ func (rt *retryThrottler) successfulRPC() {
     	}
     }
     
    -type channelzChannel struct {
    -	cc *ClientConn
    +func (ac *addrConn) incrCallsStarted() {
    +	ac.channelz.ChannelMetrics.CallsStarted.Add(1)
    +	ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
    +}
    +
    +func (ac *addrConn) incrCallsSucceeded() {
    +	ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
     }
     
    -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
    -	return c.cc.channelzMetric()
    +func (ac *addrConn) incrCallsFailed() {
    +	ac.channelz.ChannelMetrics.CallsFailed.Add(1)
     }
     
     // ErrClientConnTimeout indicates that the ClientConn cannot establish the
    @@ -1713,22 +1643,19 @@ func (cc *ClientConn) connectionError() error {
     	return cc.lastConnectionError
     }
     
    -// parseTargetAndFindResolver parses the user's dial target and stores the
    -// parsed target in `cc.parsedTarget`.
    +// initParsedTargetAndResolverBuilder parses the user's dial target and stores
    +// the parsed target in `cc.parsedTarget`.
     //
     // The resolver to use is determined based on the scheme in the parsed target
     // and the same is stored in `cc.resolverBuilder`.
     //
     // Doesn't grab cc.mu as this method is expected to be called only at Dial time.
    -func (cc *ClientConn) parseTargetAndFindResolver() error {
    -	channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
    +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
    +	logger.Infof("original dial target is: %q", cc.target)
     
     	var rb resolver.Builder
     	parsedTarget, err := parseTarget(cc.target)
    -	if err != nil {
    -		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
    -	} else {
    -		channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget)
    +	if err == nil {
     		rb = cc.getResolver(parsedTarget.URL.Scheme)
     		if rb != nil {
     			cc.parsedTarget = parsedTarget
    @@ -1740,17 +1667,19 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
     	// We are here because the user's dial target did not contain a scheme or
     	// specified an unregistered scheme. We should fallback to the default
     	// scheme, except when a custom dialer is specified in which case, we should
    -	// always use passthrough scheme.
    -	defScheme := resolver.GetDefaultScheme()
    -	channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme)
    +	// always use passthrough scheme. For either case, we need to respect any overridden
    +	// global defaults set by the user.
    +	defScheme := cc.dopts.defaultScheme
    +	if internal.UserSetDefaultScheme {
    +		defScheme = resolver.GetDefaultScheme()
    +	}
    +
     	canonicalTarget := defScheme + ":///" + cc.target
     
     	parsedTarget, err = parseTarget(canonicalTarget)
     	if err != nil {
    -		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
     		return err
     	}
    -	channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
     	rb = cc.getResolver(parsedTarget.URL.Scheme)
     	if rb == nil {
     		return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
    @@ -1772,6 +1701,8 @@ func parseTarget(target string) (resolver.Target, error) {
     	return resolver.Target{URL: *u}, nil
     }
     
    +// encodeAuthority escapes the authority string based on valid chars defined in
    +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
     func encodeAuthority(authority string) string {
     	const upperhex = "0123456789ABCDEF"
     
    @@ -1788,7 +1719,7 @@ func encodeAuthority(authority string) string {
     			return false
     		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
     			return false
    -		case ':', '[', ']', '@': // Authority related delimeters
    +		case ':', '[', ']', '@': // Authority related delimiters
     			return false
     		}
     		// Everything else must be escaped.
    @@ -1838,7 +1769,7 @@ func encodeAuthority(authority string) string {
     // credentials do not match the authority configured through the dial option.
     //
     // Doesn't grab cc.mu as this method is expected to be called only at Dial time.
    -func (cc *ClientConn) determineAuthority() error {
    +func (cc *ClientConn) initAuthority() error {
     	dopts := cc.dopts
     	// Historically, we had two options for users to specify the serverName or
     	// authority for a channel. One was through the transport credentials
    @@ -1871,6 +1802,5 @@ func (cc *ClientConn) determineAuthority() error {
     	} else {
     		cc.authority = encodeAuthority(endpoint)
     	}
    -	channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
     	return nil
     }
    diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
    index 411e3dfd47..959c2f99d4 100644
    --- a/vendor/google.golang.org/grpc/codec.go
    +++ b/vendor/google.golang.org/grpc/codec.go
    @@ -21,18 +21,73 @@ package grpc
     import (
     	"google.golang.org/grpc/encoding"
     	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
    +	"google.golang.org/grpc/mem"
     )
     
    -// baseCodec contains the functionality of both Codec and encoding.Codec, but
    -// omits the name/string, which vary between the two and are not needed for
    -// anything besides the registry in the encoding package.
    +// baseCodec captures the new encoding.CodecV2 interface without the Name
    +// function, allowing it to be implemented by older Codec and encoding.Codec
    +// implementations. The omitted Name function is only needed for the register in
    +// the encoding package and is not part of the core functionality.
     type baseCodec interface {
    -	Marshal(v any) ([]byte, error)
    -	Unmarshal(data []byte, v any) error
    +	Marshal(v any) (mem.BufferSlice, error)
    +	Unmarshal(data mem.BufferSlice, v any) error
    +}
    +
    +// getCodec returns an encoding.CodecV2 for the codec of the given name (if
    +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
    +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
    +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
    +// to turn it into an encoding.CodecV2. Returns nil otherwise.
    +func getCodec(name string) encoding.CodecV2 {
    +	if codecV1 := encoding.GetCodec(name); codecV1 != nil {
    +		return newCodecV1Bridge(codecV1)
    +	}
    +
    +	return encoding.GetCodecV2(name)
    +}
    +
    +func newCodecV0Bridge(c Codec) baseCodec {
    +	return codecV0Bridge{codec: c}
    +}
    +
    +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
    +	return codecV1Bridge{
    +		codecV0Bridge: codecV0Bridge{codec: c},
    +		name:          c.Name(),
    +	}
    +}
    +
    +var _ baseCodec = codecV0Bridge{}
    +
    +type codecV0Bridge struct {
    +	codec interface {
    +		Marshal(v any) ([]byte, error)
    +		Unmarshal(data []byte, v any) error
    +	}
    +}
    +
    +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
    +	data, err := c.codec.Marshal(v)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return mem.BufferSlice{mem.SliceBuffer(data)}, nil
    +}
    +
    +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
    +	return c.codec.Unmarshal(data.Materialize(), v)
     }
     
    -var _ baseCodec = Codec(nil)
    -var _ baseCodec = encoding.Codec(nil)
    +var _ encoding.CodecV2 = codecV1Bridge{}
    +
    +type codecV1Bridge struct {
    +	codecV0Bridge
    +	name string
    +}
    +
    +func (c codecV1Bridge) Name() string {
    +	return c.name
    +}
     
     // Codec defines the interface gRPC uses to encode and decode messages.
     // Note that implementations of this interface must be thread safe;
    diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
    deleted file mode 100644
    index 4cdc6ba7c0..0000000000
    --- a/vendor/google.golang.org/grpc/codegen.sh
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -#!/usr/bin/env bash
    -
    -# This script serves as an example to demonstrate how to generate the gRPC-Go
    -# interface and the related messages from .proto file.
    -#
    -# It assumes the installation of i) Google proto buffer compiler at
    -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
    -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
    -# not, please install them first.
    -#
    -# We recommend running this script at $GOPATH/src.
    -#
    -# If this is not what you need, feel free to make your own scripts. Again, this
    -# script is for demonstration purpose.
    -#
    -proto=$1
    -protoc --go_out=plugins=grpc:. $proto
    diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
    index 08476ad1fe..0b42c302b2 100644
    --- a/vendor/google.golang.org/grpc/codes/codes.go
    +++ b/vendor/google.golang.org/grpc/codes/codes.go
    @@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
     
     	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
     		if ci >= _maxCode {
    -			return fmt.Errorf("invalid code: %q", ci)
    +			return fmt.Errorf("invalid code: %d", ci)
     		}
     
     		*c = Code(ci)
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
    index 43726e877b..7e4bfee888 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
    @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string {
     
     // newRekeyAEAD creates a new instance of aes128gcm with rekeying.
     // The key argument should be 44 bytes, the first 32 bytes are used as a key
    -// for HKDF-expand and the remainining 12 bytes are used as a random mask for
    +// for HKDF-expand and the remaining 12 bytes are used as a random mask for
     // the counter.
     func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
     	k := len(key)
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
    index 6a9035ea25..b5bbb5497a 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
    @@ -51,7 +51,7 @@ type aes128gcmRekey struct {
     
     // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
     // for ALTS record. The key argument should be 44 bytes, the first 32 bytes
    -// are used as a key for HKDF-expand and the remainining 12 bytes are used
    +// are used as a key for HKDF-expand and the remaining 12 bytes are used
     // as a random mask for the counter.
     func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
     	inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
    index 0d64fb37a1..f1ea7bb208 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
    @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) {
     	}
     	return n, nil
     }
    -
    -func min(a, b int) int {
    -	if a < b {
    -		return a
    -	}
    -	return b
    -}
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
    index 6c867dd850..50721f690a 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
    @@ -128,7 +128,7 @@ type altsHandshaker struct {
     // NewClientHandshaker creates a core.Handshaker that performs a client-side
     // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
     // service in the metadata server.
    -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
    +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
     	return &altsHandshaker{
     		stream:     nil,
     		conn:       c,
    @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
     // NewServerHandshaker creates a core.Handshaker that performs a server-side
     // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
     // service in the metadata server.
    -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
    +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
     	return &altsHandshaker{
     		stream:     nil,
     		conn:       c,
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
    index e1cdafb980..fbfde5d047 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
    @@ -34,8 +34,6 @@ var (
     	// to a corresponding connection to a hypervisor handshaker service
     	// instance.
     	hsConnMap = make(map[string]*grpc.ClientConn)
    -	// hsDialer will be reassigned in tests.
    -	hsDialer = grpc.Dial
     )
     
     // Dial dials the handshake service in the hypervisor. If a connection has
    @@ -49,8 +47,10 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) {
     	if !ok {
     		// Create a new connection to the handshaker service. Note that
     		// this connection stays open until the application is closed.
    +		// Disable the service config to avoid unnecessary TXT record lookups that
    +		// cause timeouts with some versions of systemd-resolved.
     		var err error
    -		hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
    +		hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDisableServiceConfig())
     		if err != nil {
     			return nil, err
     		}
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
    index c7cf1810a1..83d23f65aa 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
    @@ -17,8 +17,8 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.31.0
    -// 	protoc        v4.22.0
    +// 	protoc-gen-go v1.35.1
    +// 	protoc        v5.27.1
     // source: grpc/gcp/altscontext.proto
     
     package grpc_gcp
    @@ -60,11 +60,9 @@ type AltsContext struct {
     
     func (x *AltsContext) Reset() {
     	*x = AltsContext{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *AltsContext) String() string {
    @@ -75,7 +73,7 @@ func (*AltsContext) ProtoMessage() {}
     
     func (x *AltsContext) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -201,7 +199,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte {
     }
     
     var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
    -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{
    +var file_grpc_gcp_altscontext_proto_goTypes = []any{
     	(*AltsContext)(nil),         // 0: grpc.gcp.AltsContext
     	nil,                         // 1: grpc.gcp.AltsContext.PeerAttributesEntry
     	(SecurityLevel)(0),          // 2: grpc.gcp.SecurityLevel
    @@ -224,20 +222,6 @@ func file_grpc_gcp_altscontext_proto_init() {
     		return
     	}
     	file_grpc_gcp_transport_security_common_proto_init()
    -	if !protoimpl.UnsafeEnabled {
    -		file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*AltsContext); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
    index 00407de755..915b36df82 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
    @@ -17,8 +17,8 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.31.0
    -// 	protoc        v4.22.0
    +// 	protoc-gen-go v1.35.1
    +// 	protoc        v5.27.1
     // source: grpc/gcp/handshaker.proto
     
     package grpc_gcp
    @@ -154,11 +154,9 @@ type Endpoint struct {
     
     func (x *Endpoint) Reset() {
     	*x = Endpoint{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Endpoint) String() string {
    @@ -169,7 +167,7 @@ func (*Endpoint) ProtoMessage() {}
     
     func (x *Endpoint) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -221,11 +219,9 @@ type Identity struct {
     
     func (x *Identity) Reset() {
     	*x = Identity{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Identity) String() string {
    @@ -236,7 +232,7 @@ func (*Identity) ProtoMessage() {}
     
     func (x *Identity) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -331,15 +327,18 @@ type StartClientHandshakeReq struct {
     	RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"`
     	// (Optional) Maximum frame size supported by the client.
     	MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"`
    +	// (Optional) An access token created by the caller only intended for use in
    +	// ALTS connections. The access token that should be used to authenticate to
    +	// the peer. The access token MUST be strongly bound to the ALTS credentials
    +	// used to establish the connection that the token is sent over.
    +	AccessToken string `protobuf:"bytes,11,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"`
     }
     
     func (x *StartClientHandshakeReq) Reset() {
     	*x = StartClientHandshakeReq{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *StartClientHandshakeReq) String() string {
    @@ -350,7 +349,7 @@ func (*StartClientHandshakeReq) ProtoMessage() {}
     
     func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -435,6 +434,13 @@ func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 {
     	return 0
     }
     
    +func (x *StartClientHandshakeReq) GetAccessToken() string {
    +	if x != nil {
    +		return x.AccessToken
    +	}
    +	return ""
    +}
    +
     type ServerHandshakeParameters struct {
     	state         protoimpl.MessageState
     	sizeCache     protoimpl.SizeCache
    @@ -446,15 +452,18 @@ type ServerHandshakeParameters struct {
     	// (Optional) A list of local identities supported by the server, if
     	// specified. Otherwise, the handshaker chooses a default local identity.
     	LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"`
    +	// A token created by the caller only intended for use in
    +	// ALTS connections. The token should be used to authenticate to
    +	// the peer. The token MUST be strongly bound to the ALTS credentials
    +	// used to establish the connection that the token is sent over.
    +	Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"`
     }
     
     func (x *ServerHandshakeParameters) Reset() {
     	*x = ServerHandshakeParameters{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ServerHandshakeParameters) String() string {
    @@ -465,7 +474,7 @@ func (*ServerHandshakeParameters) ProtoMessage() {}
     
     func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -494,6 +503,13 @@ func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity {
     	return nil
     }
     
    +func (x *ServerHandshakeParameters) GetToken() string {
    +	if x != nil && x.Token != nil {
    +		return *x.Token
    +	}
    +	return ""
    +}
    +
     type StartServerHandshakeReq struct {
     	state         protoimpl.MessageState
     	sizeCache     protoimpl.SizeCache
    @@ -509,7 +525,7 @@ type StartServerHandshakeReq struct {
     	// to handshake_parameters is the integer value of HandshakeProtocol enum.
     	HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
     	// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
    -	// that the peer's out_frames are split into multiple HandshakReq messages.
    +	// that the peer's out_frames are split into multiple HandshakeReq messages.
     	InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
     	// (Optional) Local endpoint information of the connection to the client,
     	// such as local IP address, port number, and network protocol.
    @@ -525,11 +541,9 @@ type StartServerHandshakeReq struct {
     
     func (x *StartServerHandshakeReq) Reset() {
     	*x = StartServerHandshakeReq{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *StartServerHandshakeReq) String() string {
    @@ -540,7 +554,7 @@ func (*StartServerHandshakeReq) ProtoMessage() {}
     
     func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -621,11 +635,9 @@ type NextHandshakeMessageReq struct {
     
     func (x *NextHandshakeMessageReq) Reset() {
     	*x = NextHandshakeMessageReq{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *NextHandshakeMessageReq) String() string {
    @@ -636,7 +648,7 @@ func (*NextHandshakeMessageReq) ProtoMessage() {}
     
     func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -680,11 +692,9 @@ type HandshakerReq struct {
     
     func (x *HandshakerReq) Reset() {
     	*x = HandshakerReq{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *HandshakerReq) String() string {
    @@ -695,7 +705,7 @@ func (*HandshakerReq) ProtoMessage() {}
     
     func (x *HandshakerReq) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -792,11 +802,9 @@ type HandshakerResult struct {
     
     func (x *HandshakerResult) Reset() {
     	*x = HandshakerResult{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *HandshakerResult) String() string {
    @@ -807,7 +815,7 @@ func (*HandshakerResult) ProtoMessage() {}
     
     func (x *HandshakerResult) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -891,11 +899,9 @@ type HandshakerStatus struct {
     
     func (x *HandshakerStatus) Reset() {
     	*x = HandshakerStatus{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *HandshakerStatus) String() string {
    @@ -906,7 +912,7 @@ func (*HandshakerStatus) ProtoMessage() {}
     
     func (x *HandshakerStatus) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -959,11 +965,9 @@ type HandshakerResp struct {
     
     func (x *HandshakerResp) Reset() {
     	*x = HandshakerResp{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *HandshakerResp) String() string {
    @@ -974,7 +978,7 @@ func (*HandshakerResp) ProtoMessage() {}
     
     func (x *HandshakerResp) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1047,7 +1051,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
     	0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
     	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10,
     	0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
    -	0x22, 0xd3, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
    +	0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
     	0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b,
     	0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
     	0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
    @@ -1084,135 +1088,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
     	0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
     	0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69,
     	0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61,
    -	0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65,
    -	0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
    -	0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
    -	0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12,
    -	0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
    -	0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63,
    -	0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c,
    -	0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa5,
    -	0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61,
    -	0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70,
    -	0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
    -	0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69,
    -	0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12,
    -	0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72,
    -	0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e,
    -	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65,
    -	0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71,
    -	0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
    -	0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73,
    -	0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19,
    -	0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
    -	0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
    -	0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
    -	0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64,
    -	0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70,
    -	0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65,
    -	0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
    -	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
    -	0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
    -	0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
    -	0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
    -	0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65,
    -	0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69,
    -	0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65,
    -	0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78,
    -	0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e,
    +	0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
    +	0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01,
    +	0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf,
    +	0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
    +	0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10,
    +	0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73,
    +	0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
    +	0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
    +	0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
    +	0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
    +	0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
    +	0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
    +	0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f,
    +	0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
    +	0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
    +	0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15,
    +	0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70,
    +	0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
    +	0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70,
    +	0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
    +	0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
    +	0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52,
    +	0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61,
    +	0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e,
     	0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
    -	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
    -	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63,
    -	0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
    -	0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c,
    -	0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61,
    -	0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65,
    -	0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20,
    -	0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12,
    -	0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f,
    -	0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
    -	0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48,
    -	0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c,
    -	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74,
    -	0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
    -	0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
    -	0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73,
    -	0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
    -	0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65,
    -	0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
    -	0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04,
    -	0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
    -	0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
    -	0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
    -	0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65,
    -	0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
    -	0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69,
    -	0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
    -	0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65,
    -	0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18,
    -	0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37,
    -	0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18,
    -	0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
    -	0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49,
    -	0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
    -	0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74,
    -	0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
    -	0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e,
    -	0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b,
    -	0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49,
    -	0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
    -	0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63,
    +	0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
    +	0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c,
    +	0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45,
    +	0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e,
    +	0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
    +	0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
    +	0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f,
    +	0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f,
    +	0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63,
     	0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
    -	0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70,
    -	0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
    -	0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
    -	0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22,
    -	0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61,
    -	0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
    -	0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69,
    -	0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
    -	0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
    -	0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d,
    -	0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61,
    -	0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
    -	0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74,
    -	0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65,
    -	0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70,
    -	0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
    -	0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32,
    -	0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
    -	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
    -	0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
    -	0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53,
    -	0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e,
    -	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54,
    -	0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45,
    -	0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
    -	0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f,
    -	0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
    -	0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03,
    -	0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
    -	0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f,
    -	0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63,
    -	0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52,
    -	0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61,
    -	0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01,
    -	0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c,
    -	0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e,
    -	0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
    -	0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
    -	0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
    -	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62,
    -	0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +	0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72,
    +	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61,
    +	0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d,
    +	0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48,
    +	0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
    +	0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c,
    +	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
    +	0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
    +	0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74,
    +	0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
    +	0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c,
    +	0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63,
    +	0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77,
    +	0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a,
    +	0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46,
    +	0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
    +	0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73,
    +	0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e,
    +	0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
    +	0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
    +	0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72,
    +	0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48,
    +	0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37,
    +	0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
    +	0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64,
    +	0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48,
    +	0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f,
    +	0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
    +	0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70,
    +	0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
    +	0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
    +	0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f,
    +	0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
    +	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f,
    +	0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74,
    +	0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61,
    +	0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
    +	0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
    +	0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65,
    +	0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
    +	0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
    +	0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
    +	0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61,
    +	0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
    +	0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e,
    +	0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72,
    +	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72,
    +	0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
    +	0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72,
    +	0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
    +	0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20,
    +	0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a,
    +	0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53,
    +	0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
    +	0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74,
    +	0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61,
    +	0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
    +	0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72,
    +	0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46,
    +	0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63,
    +	0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62,
    +	0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06,
    +	0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
    +	0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
    +	0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
    +	0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
    +	0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64,
    +	0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
    +	0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
    +	0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e,
    +	0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f,
    +	0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a,
    +	0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02,
    +	0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    +	0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50,
    +	0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
    +	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07,
    +	0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73,
    +	0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b,
    +	0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72,
    +	0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
    +	0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
    +	0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
    +	0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
    +	0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48,
    +	0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
    +	0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
    +	0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
    +	0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
    +	0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63,
    +	0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
     }
     
     var (
    @@ -1229,7 +1238,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte {
     
     var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
     var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
    -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{
    +var file_grpc_gcp_handshaker_proto_goTypes = []any{
     	(HandshakeProtocol)(0),            // 0: grpc.gcp.HandshakeProtocol
     	(NetworkProtocol)(0),              // 1: grpc.gcp.NetworkProtocol
     	(*Endpoint)(nil),                  // 2: grpc.gcp.Endpoint
    @@ -1284,133 +1293,12 @@ func file_grpc_gcp_handshaker_proto_init() {
     		return
     	}
     	file_grpc_gcp_transport_security_common_proto_init()
    -	if !protoimpl.UnsafeEnabled {
    -		file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Endpoint); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Identity); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*StartClientHandshakeReq); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ServerHandshakeParameters); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*StartServerHandshakeReq); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*NextHandshakeMessageReq); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*HandshakerReq); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*HandshakerResult); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*HandshakerStatus); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*HandshakerResp); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
    -	file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{
    +	file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{
     		(*Identity_ServiceAccount)(nil),
     		(*Identity_Hostname)(nil),
     	}
    -	file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{
    +	file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{}
    +	file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{
     		(*HandshakerReq_ClientStart)(nil),
     		(*HandshakerReq_ServerStart)(nil),
     		(*HandshakerReq_Next)(nil),
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
    index 39ecccf878..34443b1d2d 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
    @@ -17,8 +17,8 @@
     
     // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
     // versions:
    -// - protoc-gen-go-grpc v1.3.0
    -// - protoc             v4.22.0
    +// - protoc-gen-go-grpc v1.5.1
    +// - protoc             v5.27.1
     // source: grpc/gcp/handshaker.proto
     
     package grpc_gcp
    @@ -32,8 +32,8 @@ import (
     
     // This is a compile-time assertion to ensure that this generated file
     // is compatible with the grpc package it is being compiled against.
    -// Requires gRPC-Go v1.32.0 or later.
    -const _ = grpc.SupportPackageIsVersion7
    +// Requires gRPC-Go v1.64.0 or later.
    +const _ = grpc.SupportPackageIsVersion9
     
     const (
     	HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake"
    @@ -49,7 +49,7 @@ type HandshakerServiceClient interface {
     	// messages with next. Each time client sends a request, the handshaker
     	// service expects to respond. Client does not have to wait for service's
     	// response before sending next request.
    -	DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error)
    +	DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error)
     }
     
     type handshakerServiceClient struct {
    @@ -60,40 +60,22 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl
     	return &handshakerServiceClient{cc}
     }
     
    -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) {
    -	stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...)
    +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) {
    +	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
    +	stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, cOpts...)
     	if err != nil {
     		return nil, err
     	}
    -	x := &handshakerServiceDoHandshakeClient{stream}
    +	x := &grpc.GenericClientStream[HandshakerReq, HandshakerResp]{ClientStream: stream}
     	return x, nil
     }
     
    -type HandshakerService_DoHandshakeClient interface {
    -	Send(*HandshakerReq) error
    -	Recv() (*HandshakerResp, error)
    -	grpc.ClientStream
    -}
    -
    -type handshakerServiceDoHandshakeClient struct {
    -	grpc.ClientStream
    -}
    -
    -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error {
    -	return x.ClientStream.SendMsg(m)
    -}
    -
    -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) {
    -	m := new(HandshakerResp)
    -	if err := x.ClientStream.RecvMsg(m); err != nil {
    -		return nil, err
    -	}
    -	return m, nil
    -}
    +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
    +type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerReq, HandshakerResp]
     
     // HandshakerServiceServer is the server API for HandshakerService service.
     // All implementations must embed UnimplementedHandshakerServiceServer
    -// for forward compatibility
    +// for forward compatibility.
     type HandshakerServiceServer interface {
     	// Handshaker service accepts a stream of handshaker request, returning a
     	// stream of handshaker response. Client is expected to send exactly one
    @@ -101,18 +83,22 @@ type HandshakerServiceServer interface {
     	// messages with next. Each time client sends a request, the handshaker
     	// service expects to respond. Client does not have to wait for service's
     	// response before sending next request.
    -	DoHandshake(HandshakerService_DoHandshakeServer) error
    +	DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error
     	mustEmbedUnimplementedHandshakerServiceServer()
     }
     
    -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations.
    -type UnimplementedHandshakerServiceServer struct {
    -}
    +// UnimplementedHandshakerServiceServer must be embedded to have
    +// forward compatible implementations.
    +//
    +// NOTE: this should be embedded by value instead of pointer to avoid a nil
    +// pointer dereference when methods are called.
    +type UnimplementedHandshakerServiceServer struct{}
     
    -func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error {
    +func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error {
     	return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented")
     }
     func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {}
    +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue()                           {}
     
     // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service.
     // Use of this interface is not recommended, as added methods to HandshakerServiceServer will
    @@ -122,34 +108,22 @@ type UnsafeHandshakerServiceServer interface {
     }
     
     func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) {
    +	// If the following call panics, it indicates UnimplementedHandshakerServiceServer was
    +	// embedded by pointer and is nil.  This will cause panics if an
    +	// unimplemented method is ever invoked, so we test this at initialization
    +	// time to prevent it from happening at runtime later due to I/O.
    +	if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
    +		t.testEmbeddedByValue()
    +	}
     	s.RegisterService(&HandshakerService_ServiceDesc, srv)
     }
     
     func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error {
    -	return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream})
    -}
    -
    -type HandshakerService_DoHandshakeServer interface {
    -	Send(*HandshakerResp) error
    -	Recv() (*HandshakerReq, error)
    -	grpc.ServerStream
    -}
    -
    -type handshakerServiceDoHandshakeServer struct {
    -	grpc.ServerStream
    +	return srv.(HandshakerServiceServer).DoHandshake(&grpc.GenericServerStream[HandshakerReq, HandshakerResp]{ServerStream: stream})
     }
     
    -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error {
    -	return x.ServerStream.SendMsg(m)
    -}
    -
    -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) {
    -	m := new(HandshakerReq)
    -	if err := x.ServerStream.RecvMsg(m); err != nil {
    -		return nil, err
    -	}
    -	return m, nil
    -}
    +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
    +type HandshakerService_DoHandshakeServer = grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]
     
     // HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service.
     // It's only intended for direct use with grpc.RegisterService,
    diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
    index 69f0947582..e9676db4b5 100644
    --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
    +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
    @@ -17,8 +17,8 @@
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
     // versions:
    -// 	protoc-gen-go v1.31.0
    -// 	protoc        v4.22.0
    +// 	protoc-gen-go v1.35.1
    +// 	protoc        v5.27.1
     // source: grpc/gcp/transport_security_common.proto
     
     package grpc_gcp
    @@ -102,11 +102,9 @@ type RpcProtocolVersions struct {
     
     func (x *RpcProtocolVersions) Reset() {
     	*x = RpcProtocolVersions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *RpcProtocolVersions) String() string {
    @@ -117,7 +115,7 @@ func (*RpcProtocolVersions) ProtoMessage() {}
     
     func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -158,11 +156,9 @@ type RpcProtocolVersions_Version struct {
     
     func (x *RpcProtocolVersions_Version) Reset() {
     	*x = RpcProtocolVersions_Version{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *RpcProtocolVersions_Version) String() string {
    @@ -173,7 +169,7 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {}
     
     func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message {
     	mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -253,7 +249,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte {
     
     var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
     var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
    -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{
    +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{
     	(SecurityLevel)(0),                  // 0: grpc.gcp.SecurityLevel
     	(*RpcProtocolVersions)(nil),         // 1: grpc.gcp.RpcProtocolVersions
     	(*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version
    @@ -273,32 +269,6 @@ func file_grpc_gcp_transport_security_common_proto_init() {
     	if File_grpc_gcp_transport_security_common_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*RpcProtocolVersions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*RpcProtocolVersions_Version); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
    index 5feac3aa0e..665e790bb0 100644
    --- a/vendor/google.golang.org/grpc/credentials/credentials.go
    +++ b/vendor/google.golang.org/grpc/credentials/credentials.go
    @@ -28,9 +28,9 @@ import (
     	"fmt"
     	"net"
     
    -	"github.com/golang/protobuf/proto"
     	"google.golang.org/grpc/attributes"
     	icredentials "google.golang.org/grpc/internal/credentials"
    +	"google.golang.org/protobuf/proto"
     )
     
     // PerRPCCredentials defines the common interface for the credentials which need to
    @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
     }
     
     // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
    -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
    +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
     // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
     //
     // This API is experimental.
    diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go
    index fbdf7dc299..5a9c9461f0 100644
    --- a/vendor/google.golang.org/grpc/credentials/google/google.go
    +++ b/vendor/google.golang.org/grpc/credentials/google/google.go
    @@ -22,7 +22,6 @@ package google
     import (
     	"context"
     	"fmt"
    -	"time"
     
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/credentials/alts"
    @@ -31,7 +30,7 @@ import (
     	"google.golang.org/grpc/internal"
     )
     
    -const tokenRequestTimeout = 30 * time.Second
    +const defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
     
     var logger = grpclog.Component("credentials")
     
    @@ -39,6 +38,9 @@ var logger = grpclog.Component("credentials")
     type DefaultCredentialsOptions struct {
     	// PerRPCCreds is a per RPC credentials that is passed to a bundle.
     	PerRPCCreds credentials.PerRPCCredentials
    +	// ALTSPerRPCCreds is a per RPC credentials that, if specified, will
    +	// supercede PerRPCCreds above for and only for ALTS connections.
    +	ALTSPerRPCCreds credentials.PerRPCCredentials
     }
     
     // NewDefaultCredentialsWithOptions returns a credentials bundle that is
    @@ -47,14 +49,21 @@ type DefaultCredentialsOptions struct {
     // This API is experimental.
     func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle {
     	if opts.PerRPCCreds == nil {
    -		ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout)
    -		defer cancel()
     		var err error
    -		opts.PerRPCCreds, err = newADC(ctx)
    +		// If the ADC ends up being Compute Engine Credentials, this context
    +		// won't be used. Otherwise, the context dictates all the subsequent
    +		// token requests via HTTP. So we cannot have any deadline or timeout.
    +		opts.PerRPCCreds, err = newADC(context.TODO())
     		if err != nil {
     			logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err)
     		}
     	}
    +	if opts.ALTSPerRPCCreds != nil {
    +		opts.PerRPCCreds = &dualPerRPCCreds{
    +			perRPCCreds:     opts.PerRPCCreds,
    +			altsPerRPCCreds: opts.ALTSPerRPCCreds,
    +		}
    +	}
     	c := &creds{opts: opts}
     	bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
     	if err != nil {
    @@ -113,7 +122,7 @@ var (
     		return alts.NewClientCreds(alts.DefaultClientOptions())
     	}
     	newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) {
    -		return oauth.NewApplicationDefault(ctx)
    +		return oauth.NewApplicationDefault(ctx, defaultCloudPlatformScope)
     	}
     )
     
    @@ -143,3 +152,27 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) {
     
     	return newCreds, nil
     }
    +
    +// dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the
    +// fallback PerRPCCredentials and the ALTS one. It pickes one of them based on
    +// the channel type.
    +type dualPerRPCCreds struct {
    +	perRPCCreds     credentials.PerRPCCredentials
    +	altsPerRPCCreds credentials.PerRPCCredentials
    +}
    +
    +func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
    +	ri, ok := credentials.RequestInfoFromContext(ctx)
    +	if !ok {
    +		return nil, fmt.Errorf("request info not found from context")
    +	}
    +	if authType := ri.AuthInfo.AuthType(); authType == "alts" {
    +		return d.altsPerRPCCreds.GetRequestMetadata(ctx, uri...)
    +	}
    +	// This ensures backward compatibility even if authType is not "tls".
    +	return d.perRPCCreds.GetRequestMetadata(ctx, uri...)
    +}
    +
    +func (d *dualPerRPCCreds) RequireTransportSecurity() bool {
    +	return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity()
    +}
    diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go
    index 2c5c8b9eee..cccb22271e 100644
    --- a/vendor/google.golang.org/grpc/credentials/google/xds.go
    +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go
    @@ -25,7 +25,7 @@ import (
     	"strings"
     
     	"google.golang.org/grpc/credentials"
    -	"google.golang.org/grpc/internal"
    +	"google.golang.org/grpc/internal/xds"
     )
     
     const cfeClusterNamePrefix = "google_cfe_"
    @@ -63,7 +63,7 @@ func clusterName(ctx context.Context) string {
     	if chi.Attributes == nil {
     		return ""
     	}
    -	cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes)
    +	cluster, _ := xds.GetXDSHandshakeClusterName(chi.Attributes)
     	return cluster
     }
     
    diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    index 82bee1443b..4c805c6446 100644
    --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
    @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
     // NoSecurity.
     type insecureTC struct{}
     
    -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
    +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
     	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
     }
     
    diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
    index d475cbc089..328b838ed1 100644
    --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
    +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
    @@ -38,7 +38,7 @@ type TokenSource struct {
     }
     
     // GetRequestMetadata gets the request metadata as a map from a TokenSource.
    -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
    +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
     	token, err := ts.Token()
     	if err != nil {
     		return nil, err
    @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
     	return oauthAccess{token: *token}
     }
     
    -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
    +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
     	ri, _ := credentials.RequestInfoFromContext(ctx)
     	if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
     		return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err)
    @@ -156,7 +156,7 @@ type serviceAccount struct {
     	t      *oauth2.Token
     }
     
    -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
    +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
     	s.mu.Lock()
     	defer s.mu.Unlock()
     	if !s.t.Valid() {
    diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
    index 5dafd34edf..e163a473df 100644
    --- a/vendor/google.golang.org/grpc/credentials/tls.go
    +++ b/vendor/google.golang.org/grpc/credentials/tls.go
    @@ -27,9 +27,13 @@ import (
     	"net/url"
     	"os"
     
    +	"google.golang.org/grpc/grpclog"
     	credinternal "google.golang.org/grpc/internal/credentials"
    +	"google.golang.org/grpc/internal/envconfig"
     )
     
    +var logger = grpclog.Component("credentials")
    +
     // TLSInfo contains the auth information for a TLS authenticated connection.
     // It implements the AuthInfo interface.
     type TLSInfo struct {
    @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
     		conn.Close()
     		return nil, nil, ctx.Err()
     	}
    +
    +	// The negotiated protocol can be either of the following:
    +	// 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
    +	//    it is the only protocol advertised by the client during the handshake.
    +	//    The tls library ensures that the server chooses a protocol advertised
    +	//    by the client.
    +	// 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
    +	//    for using HTTP/2 over TLS. We can terminate the connection immediately.
    +	np := conn.ConnectionState().NegotiatedProtocol
    +	if np == "" {
    +		if envconfig.EnforceALPNEnabled {
    +			conn.Close()
    +			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
    +		}
    +		logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
    +	}
     	tlsInfo := TLSInfo{
     		State: conn.ConnectionState(),
     		CommonAuthInfo: CommonAuthInfo{
    @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
     		conn.Close()
     		return nil, nil, err
     	}
    +	cs := conn.ConnectionState()
    +	// The negotiated application protocol can be empty only if the client doesn't
    +	// support ALPN. In such cases, we can close the connection since ALPN is required
    +	// for using HTTP/2 over TLS.
    +	if cs.NegotiatedProtocol == "" {
    +		if envconfig.EnforceALPNEnabled {
    +			conn.Close()
    +			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
    +		} else if logger.V(2) {
    +			logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
    +		}
    +	}
     	tlsInfo := TLSInfo{
    -		State: conn.ConnectionState(),
    +		State: cs,
     		CommonAuthInfo: CommonAuthInfo{
     			SecurityLevel: PrivacyAndIntegrity,
     		},
    @@ -168,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{
     
     // NewTLS uses c to construct a TransportCredentials based on TLS.
     func NewTLS(c *tls.Config) TransportCredentials {
    -	tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
    -	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
    +	config := applyDefaults(c)
    +	if config.GetConfigForClient != nil {
    +		oldFn := config.GetConfigForClient
    +		config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
    +			cfgForClient, err := oldFn(hello)
    +			if err != nil || cfgForClient == nil {
    +				return cfgForClient, err
    +			}
    +			return applyDefaults(cfgForClient), nil
    +		}
    +	}
    +	return &tlsCreds{config: config}
    +}
    +
    +func applyDefaults(c *tls.Config) *tls.Config {
    +	config := credinternal.CloneTLSConfig(c)
    +	config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
     	// If the user did not configure a MinVersion and did not configure a
     	// MaxVersion < 1.2, use MinVersion=1.2, which is required by
     	// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
    -	if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
    -		tc.config.MinVersion = tls.VersionTLS12
    +	if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
    +		config.MinVersion = tls.VersionTLS12
     	}
     	// If the user did not configure CipherSuites, use all "secure" cipher
     	// suites reported by the TLS package, but remove some explicitly forbidden
     	// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
    -	if tc.config.CipherSuites == nil {
    +	if config.CipherSuites == nil {
     		for _, cs := range tls.CipherSuites() {
     			if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
    -				tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
    +				config.CipherSuites = append(config.CipherSuites, cs.ID)
     			}
     		}
     	}
    -	return tc
    +	return config
     }
     
     // NewClientTLSFromCert constructs TLS credentials from the provided root
    diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
    index ba24261804..7494ae591f 100644
    --- a/vendor/google.golang.org/grpc/dialoptions.go
    +++ b/vendor/google.golang.org/grpc/dialoptions.go
    @@ -21,6 +21,7 @@ package grpc
     import (
     	"context"
     	"net"
    +	"net/url"
     	"time"
     
     	"google.golang.org/grpc/backoff"
    @@ -32,10 +33,16 @@ import (
     	"google.golang.org/grpc/internal/binarylog"
     	"google.golang.org/grpc/internal/transport"
     	"google.golang.org/grpc/keepalive"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/stats"
     )
     
    +const (
    +	// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
    +	defaultMaxCallAttempts = 5
    +)
    +
     func init() {
     	internal.AddGlobalDialOptions = func(opt ...DialOption) {
     		globalDialOptions = append(globalDialOptions, opt...)
    @@ -43,10 +50,18 @@ func init() {
     	internal.ClearGlobalDialOptions = func() {
     		globalDialOptions = nil
     	}
    +	internal.AddGlobalPerTargetDialOptions = func(opt any) {
    +		if ptdo, ok := opt.(perTargetDialOption); ok {
    +			globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
    +		}
    +	}
    +	internal.ClearGlobalPerTargetDialOptions = func() {
    +		globalPerTargetDialOptions = nil
    +	}
     	internal.WithBinaryLogger = withBinaryLogger
     	internal.JoinDialOptions = newJoinDialOption
     	internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
    -	internal.WithRecvBufferPool = withRecvBufferPool
    +	internal.WithBufferPool = withBufferPool
     }
     
     // dialOptions configure a Dial call. dialOptions are set by the DialOption
    @@ -68,17 +83,17 @@ type dialOptions struct {
     	binaryLogger                binarylog.Logger
     	copts                       transport.ConnectOptions
     	callOptions                 []CallOption
    -	channelzParentID            *channelz.Identifier
    +	channelzParent              channelz.Identifier
     	disableServiceConfig        bool
     	disableRetry                bool
     	disableHealthCheck          bool
    -	healthCheckFunc             internal.HealthChecker
     	minConnectTimeout           func() time.Duration
     	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
     	defaultServiceConfigRawJSON *string
     	resolvers                   []resolver.Builder
     	idleTimeout                 time.Duration
    -	recvBufferPool              SharedBufferPool
    +	defaultScheme               string
    +	maxCallAttempts             int
     }
     
     // DialOption configures how we set up the connection.
    @@ -88,6 +103,19 @@ type DialOption interface {
     
     var globalDialOptions []DialOption
     
    +// perTargetDialOption takes a parsed target and returns a dial option to apply.
    +//
    +// This gets called after NewClient() parses the target, and allows per target
    +// configuration set through a returned DialOption. The DialOption will not take
    +// effect if specifies a resolver builder, as that Dial Option is factored in
    +// while parsing target.
    +type perTargetDialOption interface {
    +	// DialOption returns a Dial Option to apply.
    +	DialOptionForTarget(parsedTarget url.URL) DialOption
    +}
    +
    +var globalPerTargetDialOptions []perTargetDialOption
    +
     // EmptyDialOption does not alter the dial configuration. It can be embedded in
     // another structure to build custom dial options.
     //
    @@ -154,9 +182,7 @@ func WithSharedWriteBuffer(val bool) DialOption {
     }
     
     // WithWriteBufferSize determines how much data can be batched before doing a
    -// write on the wire. The corresponding memory allocation for this buffer will
    -// be twice the size to keep syscalls low. The default value for this buffer is
    -// 32KB.
    +// write on the wire. The default value for this buffer is 32KB.
     //
     // Zero or negative values will disable the write buffer such that each write
     // will be on underlying connection. Note: A Send call may not directly
    @@ -301,6 +327,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
     //
     // Use of this feature is not recommended.  For more information, please see:
     // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
    +//
    +// Deprecated: this DialOption is not supported by NewClient.
    +// Will be supported throughout 1.x.
     func WithBlock() DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.block = true
    @@ -315,10 +344,8 @@ func WithBlock() DialOption {
     // Use of this feature is not recommended.  For more information, please see:
     // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
     //
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
    +// Deprecated: this DialOption is not supported by NewClient.
    +// Will be supported throughout 1.x.
     func WithReturnConnectionError() DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.block = true
    @@ -388,8 +415,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
     // WithTimeout returns a DialOption that configures a timeout for dialing a
     // ClientConn initially. This is valid if and only if WithBlock() is present.
     //
    -// Deprecated: use DialContext instead of Dial and context.WithTimeout
    -// instead.  Will be supported throughout 1.x.
    +// Deprecated: this DialOption is not supported by NewClient.
    +// Will be supported throughout 1.x.
     func WithTimeout(d time.Duration) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
     		o.timeout = d
    @@ -408,7 +435,7 @@ func WithTimeout(d time.Duration) DialOption {
     // option to true from the Control field. For a concrete example of how to do
     // this, see internal.NetDialerWithTCPKeepalive().
     //
    -// For more information, please see [issue 23459] in the Go github repo.
    +// For more information, please see [issue 23459] in the Go GitHub repo.
     //
     // [issue 23459]: https://github.com/golang/go/issues/23459
     func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
    @@ -417,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
     	})
     }
     
    -func init() {
    -	internal.WithHealthCheckFunc = withHealthCheckFunc
    -}
    -
     // WithDialer returns a DialOption that specifies a function to use for dialing
     // network addresses. If FailOnNonTempDialError() is set to true, and an error
     // is returned by f, gRPC checks the error's Temporary() method to decide if it
    @@ -471,9 +494,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
     // Use of this feature is not recommended.  For more information, please see:
     // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
     //
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// Deprecated: this DialOption is not supported by NewClient.
    +// This API may be changed or removed in a
     // later release.
     func FailOnNonTempDialError(f bool) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
    @@ -491,6 +513,8 @@ func WithUserAgent(s string) DialOption {
     
     // WithKeepaliveParams returns a DialOption that specifies keepalive parameters
     // for the client transport.
    +//
    +// Keepalive is disabled by default.
     func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
     	if kp.Time < internal.KeepaliveMinPingTime {
     		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
    @@ -555,9 +579,9 @@ func WithAuthority(a string) DialOption {
     //
     // Notice: This API is EXPERIMENTAL and may be changed or removed in a
     // later release.
    -func WithChannelzParentID(id *channelz.Identifier) DialOption {
    +func WithChannelzParentID(c channelz.Identifier) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
    -		o.channelzParentID = id
    +		o.channelzParent = c
     	})
     }
     
    @@ -602,12 +626,22 @@ func WithDisableRetry() DialOption {
     	})
     }
     
    +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
    +// (uncompressed) size of header list that the client is prepared to accept.
    +type MaxHeaderListSizeDialOption struct {
    +	MaxHeaderListSize uint32
    +}
    +
    +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
    +	do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
    +}
    +
     // WithMaxHeaderListSize returns a DialOption that specifies the maximum
     // (uncompressed) size of header list that the client is prepared to accept.
     func WithMaxHeaderListSize(s uint32) DialOption {
    -	return newFuncDialOption(func(o *dialOptions) {
    -		o.copts.MaxHeaderListSize = &s
    -	})
    +	return MaxHeaderListSizeDialOption{
    +		MaxHeaderListSize: s,
    +	}
     }
     
     // WithDisableHealthCheck disables the LB channel health checking for all
    @@ -623,16 +657,6 @@ func WithDisableHealthCheck() DialOption {
     	})
     }
     
    -// withHealthCheckFunc replaces the default health check function with the
    -// provided one. It makes tests easier to change the health check function.
    -//
    -// For testing purpose only.
    -func withHealthCheckFunc(f internal.HealthChecker) DialOption {
    -	return newFuncDialOption(func(o *dialOptions) {
    -		o.healthCheckFunc = f
    -	})
    -}
    -
     func defaultDialOptions() dialOptions {
     	return dialOptions{
     		copts: transport.ConnectOptions{
    @@ -640,15 +664,16 @@ func defaultDialOptions() dialOptions {
     			WriteBufferSize: defaultWriteBufSize,
     			UseProxy:        true,
     			UserAgent:       grpcUA,
    +			BufferPool:      mem.DefaultBufferPool(),
     		},
     		bs:              internalbackoff.DefaultExponential,
    -		healthCheckFunc: internal.HealthCheckFunc,
     		idleTimeout:     30 * time.Minute,
    -		recvBufferPool:  nopBufferPool{},
    +		defaultScheme:   "dns",
    +		maxCallAttempts: defaultMaxCallAttempts,
     	}
     }
     
    -// withGetMinConnectDeadline specifies the function that clientconn uses to
    +// withMinConnectDeadline specifies the function that clientconn uses to
     // get minConnectDeadline. This can be used to make connection attempts happen
     // faster/slower.
     //
    @@ -659,6 +684,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
     	})
     }
     
    +// withDefaultScheme is used to allow Dial to use "passthrough" as the default
    +// name resolver, while NewClient uses "dns" otherwise.
    +func withDefaultScheme(s string) DialOption {
    +	return newFuncDialOption(func(o *dialOptions) {
    +		o.defaultScheme = s
    +	})
    +}
    +
     // WithResolvers allows a list of resolver implementations to be registered
     // locally with the ClientConn without needing to be globally registered via
     // resolver.Register.  They will be matched against the scheme used for the
    @@ -694,25 +727,25 @@ func WithIdleTimeout(d time.Duration) DialOption {
     	})
     }
     
    -// WithRecvBufferPool returns a DialOption that configures the ClientConn
    -// to use the provided shared buffer pool for parsing incoming messages. Depending
    -// on the application's workload, this could result in reduced memory allocation.
    +// WithMaxCallAttempts returns a DialOption that configures the maximum number
    +// of attempts per call (including retries and hedging) using the channel.
    +// Service owners may specify a higher value for these parameters, but higher
    +// values will be treated as equal to the maximum value by the client
    +// implementation. This mitigates security concerns related to the service
    +// config being transferred to the client via DNS.
     //
    -// If you are unsure about how to implement a memory pool but want to utilize one,
    -// begin with grpc.NewSharedBufferPool.
    -//
    -// Note: The shared buffer pool feature will not be active if any of the following
    -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
    -// cases, the shared buffer pool will be ignored.
    -//
    -// Deprecated: use experimental.WithRecvBufferPool instead.  Will be deleted in
    -// v1.60.0 or later.
    -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
    -	return withRecvBufferPool(bufferPool)
    +// A value of 5 will be used if this dial option is not set or n < 2.
    +func WithMaxCallAttempts(n int) DialOption {
    +	return newFuncDialOption(func(o *dialOptions) {
    +		if n < 2 {
    +			n = defaultMaxCallAttempts
    +		}
    +		o.maxCallAttempts = n
    +	})
     }
     
    -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
    +func withBufferPool(bufferPool mem.BufferPool) DialOption {
     	return newFuncDialOption(func(o *dialOptions) {
    -		o.recvBufferPool = bufferPool
    +		o.copts.BufferPool = bufferPool
     	})
     }
    diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
    index 0022859ad7..e7b532b6f8 100644
    --- a/vendor/google.golang.org/grpc/doc.go
    +++ b/vendor/google.golang.org/grpc/doc.go
    @@ -16,7 +16,7 @@
      *
      */
     
    -//go:generate ./regenerate.sh
    +//go:generate ./scripts/regenerate.sh
     
     /*
     Package grpc implements an RPC system called gRPC.
    diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
    index 5ebf88d714..11d0ae142c 100644
    --- a/vendor/google.golang.org/grpc/encoding/encoding.go
    +++ b/vendor/google.golang.org/grpc/encoding/encoding.go
    @@ -94,7 +94,7 @@ type Codec interface {
     	Name() string
     }
     
    -var registeredCodecs = make(map[string]Codec)
    +var registeredCodecs = make(map[string]any)
     
     // RegisterCodec registers the provided Codec for use with all gRPC clients and
     // servers.
    @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
     //
     // The content-subtype is expected to be lowercase.
     func GetCodec(contentSubtype string) Codec {
    -	return registeredCodecs[contentSubtype]
    +	c, _ := registeredCodecs[contentSubtype].(Codec)
    +	return c
     }
    diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
    new file mode 100644
    index 0000000000..074c5e234a
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
    @@ -0,0 +1,81 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package encoding
    +
    +import (
    +	"strings"
    +
    +	"google.golang.org/grpc/mem"
    +)
    +
    +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
    +// that implementations of this interface must be thread safe; a CodecV2's
    +// methods can be called from concurrent goroutines.
    +type CodecV2 interface {
    +	// Marshal returns the wire format of v. The buffers in the returned
    +	// [mem.BufferSlice] must have at least one reference each, which will be freed
    +	// by gRPC when they are no longer needed.
    +	Marshal(v any) (out mem.BufferSlice, err error)
    +	// Unmarshal parses the wire format into v. Note that data will be freed as soon
    +	// as this function returns. If the codec wishes to guarantee access to the data
    +	// after this function, it must take its own reference that it frees when it is
    +	// no longer needed.
    +	Unmarshal(data mem.BufferSlice, v any) error
    +	// Name returns the name of the Codec implementation. The returned string
    +	// will be used as part of content type in transmission.  The result must be
    +	// static; the result cannot change between calls.
    +	Name() string
    +}
    +
    +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
    +// servers.
    +//
    +// The CodecV2 will be stored and looked up by result of its Name() method, which
    +// should match the content-subtype of the encoding handled by the CodecV2.  This
    +// is case-insensitive, and is stored and looked up as lowercase.  If the
    +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
    +// Content-Type on
    +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
    +// more details.
    +//
    +// If both a Codec and CodecV2 are registered with the same name, the CodecV2
    +// will be used.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe.  If multiple Codecs are
    +// registered with the same name, the one registered last will take effect.
    +func RegisterCodecV2(codec CodecV2) {
    +	if codec == nil {
    +		panic("cannot register a nil CodecV2")
    +	}
    +	if codec.Name() == "" {
    +		panic("cannot register CodecV2 with empty string result for Name()")
    +	}
    +	contentSubtype := strings.ToLower(codec.Name())
    +	registeredCodecs[contentSubtype] = codec
    +}
    +
    +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
    +// registered for the content-subtype.
    +//
    +// The content-subtype is expected to be lowercase.
    +func GetCodecV2(contentSubtype string) CodecV2 {
    +	c, _ := registeredCodecs[contentSubtype].(CodecV2)
    +	return c
    +}
    diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
    index 0ee3d3bae9..ceec319dd2 100644
    --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
    +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
    @@ -1,6 +1,6 @@
     /*
      *
    - * Copyright 2018 gRPC authors.
    + * Copyright 2024 gRPC authors.
      *
      * Licensed under the Apache License, Version 2.0 (the "License");
      * you may not use this file except in compliance with the License.
    @@ -23,36 +23,74 @@ package proto
     import (
     	"fmt"
     
    -	"github.com/golang/protobuf/proto"
     	"google.golang.org/grpc/encoding"
    +	"google.golang.org/grpc/mem"
    +	"google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/protoadapt"
     )
     
     // Name is the name registered for the proto compressor.
     const Name = "proto"
     
     func init() {
    -	encoding.RegisterCodec(codec{})
    +	encoding.RegisterCodecV2(&codecV2{})
     }
     
    -// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
    -type codec struct{}
    +// codec is a CodecV2 implementation with protobuf. It is the default codec for
    +// gRPC.
    +type codecV2 struct{}
     
    -func (codec) Marshal(v any) ([]byte, error) {
    -	vv, ok := v.(proto.Message)
    -	if !ok {
    -		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
    +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
    +	vv := messageV2Of(v)
    +	if vv == nil {
    +		return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
     	}
    -	return proto.Marshal(vv)
    +
    +	size := proto.Size(vv)
    +	if mem.IsBelowBufferPoolingThreshold(size) {
    +		buf, err := proto.Marshal(vv)
    +		if err != nil {
    +			return nil, err
    +		}
    +		data = append(data, mem.SliceBuffer(buf))
    +	} else {
    +		pool := mem.DefaultBufferPool()
    +		buf := pool.Get(size)
    +		if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
    +			pool.Put(buf)
    +			return nil, err
    +		}
    +		data = append(data, mem.NewBuffer(buf, pool))
    +	}
    +
    +	return data, nil
     }
     
    -func (codec) Unmarshal(data []byte, v any) error {
    -	vv, ok := v.(proto.Message)
    -	if !ok {
    +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
    +	vv := messageV2Of(v)
    +	if vv == nil {
     		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
     	}
    -	return proto.Unmarshal(data, vv)
    +
    +	buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
    +	defer buf.Free()
    +	// TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
    +	//  really possible without a major overhaul of the proto package, but the
    +	//  vtprotobuf library may be able to support this.
    +	return proto.Unmarshal(buf.ReadOnlyData(), vv)
    +}
    +
    +func messageV2Of(v any) proto.Message {
    +	switch v := v.(type) {
    +	case protoadapt.MessageV1:
    +		return protoadapt.MessageV2Of(v)
    +	case protoadapt.MessageV2:
    +		return v
    +	}
    +
    +	return nil
     }
     
    -func (codec) Name() string {
    +func (c *codecV2) Name() string {
     	return Name
     }
    diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
    new file mode 100644
    index 0000000000..ad75313a18
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
    @@ -0,0 +1,270 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package stats
    +
    +import (
    +	"maps"
    +
    +	"google.golang.org/grpc/grpclog"
    +	"google.golang.org/grpc/internal"
    +	"google.golang.org/grpc/stats"
    +)
    +
    +func init() {
    +	internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
    +}
    +
    +var logger = grpclog.Component("metrics-registry")
    +
    +// DefaultMetrics are the default metrics registered through global metrics
    +// registry. This is written to at initialization time only, and is read only
    +// after initialization.
    +var DefaultMetrics = stats.NewMetricSet()
    +
    +// MetricDescriptor is the data for a registered metric.
    +type MetricDescriptor struct {
    +	// The name of this metric. This name must be unique across the whole binary
    +	// (including any per call metrics). See
    +	// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
    +	// for metric naming conventions.
    +	Name string
    +	// The description of this metric.
    +	Description string
    +	// The unit (e.g. entries, seconds) of this metric.
    +	Unit string
    +	// The required label keys for this metric. These are intended to
    +	// metrics emitted from a stats handler.
    +	Labels []string
    +	// The optional label keys for this metric. These are intended to attached
    +	// to metrics emitted from a stats handler if configured.
    +	OptionalLabels []string
    +	// Whether this metric is on by default.
    +	Default bool
    +	// The type of metric. This is set by the metric registry, and not intended
    +	// to be set by a component registering a metric.
    +	Type MetricType
    +	// Bounds are the bounds of this metric. This only applies to histogram
    +	// metrics. If unset or set with length 0, stats handlers will fall back to
    +	// default bounds.
    +	Bounds []float64
    +}
    +
    +// MetricType is the type of metric.
    +type MetricType int
    +
    +// Type of metric supported by this instrument registry.
    +const (
    +	MetricTypeIntCount MetricType = iota
    +	MetricTypeFloatCount
    +	MetricTypeIntHisto
    +	MetricTypeFloatHisto
    +	MetricTypeIntGauge
    +)
    +
    +// Int64CountHandle is a typed handle for a int count metric. This handle
    +// is passed at the recording point in order to know which metric to record
    +// on.
    +type Int64CountHandle MetricDescriptor
    +
    +// Descriptor returns the int64 count handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the int64 count value on the metrics recorder provided.
    +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
    +	recorder.RecordInt64Count(h, incr, labels...)
    +}
    +
    +// Float64CountHandle is a typed handle for a float count metric. This handle is
    +// passed at the recording point in order to know which metric to record on.
    +type Float64CountHandle MetricDescriptor
    +
    +// Descriptor returns the float64 count handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the float64 count value on the metrics recorder provided.
    +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
    +	recorder.RecordFloat64Count(h, incr, labels...)
    +}
    +
    +// Int64HistoHandle is a typed handle for an int histogram metric. This handle
    +// is passed at the recording point in order to know which metric to record on.
    +type Int64HistoHandle MetricDescriptor
    +
    +// Descriptor returns the int64 histo handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the int64 histo value on the metrics recorder provided.
    +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
    +	recorder.RecordInt64Histo(h, incr, labels...)
    +}
    +
    +// Float64HistoHandle is a typed handle for a float histogram metric. This
    +// handle is passed at the recording point in order to know which metric to
    +// record on.
    +type Float64HistoHandle MetricDescriptor
    +
    +// Descriptor returns the float64 histo handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the float64 histo value on the metrics recorder provided.
    +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
    +	recorder.RecordFloat64Histo(h, incr, labels...)
    +}
    +
    +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
    +// passed at the recording point in order to know which metric to record on.
    +type Int64GaugeHandle MetricDescriptor
    +
    +// Descriptor returns the int64 gauge handle typecast to a pointer to a
    +// MetricDescriptor.
    +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
    +	return (*MetricDescriptor)(h)
    +}
    +
    +// Record records the int64 histo value on the metrics recorder provided.
    +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
    +	recorder.RecordInt64Gauge(h, incr, labels...)
    +}
    +
    +// registeredMetrics are the registered metric descriptor names.
    +var registeredMetrics = make(map[string]bool)
    +
    +// metricsRegistry contains all of the registered metrics.
    +//
    +// This is written to only at init time, and read only after that.
    +var metricsRegistry = make(map[string]*MetricDescriptor)
    +
    +// DescriptorForMetric returns the MetricDescriptor from the global registry.
    +//
    +// Returns nil if MetricDescriptor not present.
    +func DescriptorForMetric(metricName string) *MetricDescriptor {
    +	return metricsRegistry[metricName]
    +}
    +
    +func registerMetric(metricName string, def bool) {
    +	if registeredMetrics[metricName] {
    +		logger.Fatalf("metric %v already registered", metricName)
    +	}
    +	registeredMetrics[metricName] = true
    +	if def {
    +		DefaultMetrics = DefaultMetrics.Add(metricName)
    +	}
    +}
    +
    +// RegisterInt64Count registers the metric description onto the global registry.
    +// It returns a typed handle to use to recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeIntCount
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Int64CountHandle)(descPtr)
    +}
    +
    +// RegisterFloat64Count registers the metric description onto the global
    +// registry. It returns a typed handle to use to recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeFloatCount
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Float64CountHandle)(descPtr)
    +}
    +
    +// RegisterInt64Histo registers the metric description onto the global registry.
    +// It returns a typed handle to use to recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeIntHisto
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Int64HistoHandle)(descPtr)
    +}
    +
    +// RegisterFloat64Histo registers the metric description onto the global
    +// registry. It returns a typed handle to use to recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeFloatHisto
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Float64HistoHandle)(descPtr)
    +}
    +
    +// RegisterInt64Gauge registers the metric description onto the global registry.
    +// It returns a typed handle to use to recording data.
    +//
    +// NOTE: this function must only be called during initialization time (i.e. in
    +// an init() function), and is not thread-safe. If multiple metrics are
    +// registered with the same name, this function will panic.
    +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
    +	registerMetric(descriptor.Name, descriptor.Default)
    +	descriptor.Type = MetricTypeIntGauge
    +	descPtr := &descriptor
    +	metricsRegistry[descriptor.Name] = descPtr
    +	return (*Int64GaugeHandle)(descPtr)
    +}
    +
    +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
    +// registry. Returns a cleanup function that sets the metrics registry to its
    +// original state.
    +func snapshotMetricsRegistryForTesting() func() {
    +	oldDefaultMetrics := DefaultMetrics
    +	oldRegisteredMetrics := registeredMetrics
    +	oldMetricsRegistry := metricsRegistry
    +
    +	registeredMetrics = make(map[string]bool)
    +	metricsRegistry = make(map[string]*MetricDescriptor)
    +	maps.Copy(registeredMetrics, registeredMetrics)
    +	maps.Copy(metricsRegistry, metricsRegistry)
    +
    +	return func() {
    +		DefaultMetrics = oldDefaultMetrics
    +		registeredMetrics = oldRegisteredMetrics
    +		metricsRegistry = oldMetricsRegistry
    +	}
    +}
    diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
    new file mode 100644
    index 0000000000..ee1423605a
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
    @@ -0,0 +1,54 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package stats contains experimental metrics/stats API's.
    +package stats
    +
    +import "google.golang.org/grpc/stats"
    +
    +// MetricsRecorder records on metrics derived from metric registry.
    +type MetricsRecorder interface {
    +	// RecordInt64Count records the measurement alongside labels on the int
    +	// count associated with the provided handle.
    +	RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
    +	// RecordFloat64Count records the measurement alongside labels on the float
    +	// count associated with the provided handle.
    +	RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
    +	// RecordInt64Histo records the measurement alongside labels on the int
    +	// histo associated with the provided handle.
    +	RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
    +	// RecordFloat64Histo records the measurement alongside labels on the float
    +	// histo associated with the provided handle.
    +	RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
    +	// RecordInt64Gauge records the measurement alongside labels on the int
    +	// gauge associated with the provided handle.
    +	RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
    +}
    +
    +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
    +// Metrics will be deleted in a future release.
    +type Metrics = stats.MetricSet
    +
    +// Metric was replaced by direct usage of strings.
    +type Metric = string
    +
    +// NewMetrics is an experimental legacy alias of the now-stable
    +// stats.NewMetricSet.  NewMetrics will be deleted in a future release.
    +func NewMetrics(metrics ...Metric) *Metrics {
    +	return stats.NewMetricSet(metrics...)
    +}
    diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
    index ac73c9ced2..f1ae080dcb 100644
    --- a/vendor/google.golang.org/grpc/grpclog/component.go
    +++ b/vendor/google.golang.org/grpc/grpclog/component.go
    @@ -20,8 +20,6 @@ package grpclog
     
     import (
     	"fmt"
    -
    -	"google.golang.org/grpc/internal/grpclog"
     )
     
     // componentData records the settings for a component.
    @@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
     
     func (c *componentData) InfoDepth(depth int, args ...any) {
     	args = append([]any{"[" + string(c.name) + "]"}, args...)
    -	grpclog.InfoDepth(depth+1, args...)
    +	InfoDepth(depth+1, args...)
     }
     
     func (c *componentData) WarningDepth(depth int, args ...any) {
     	args = append([]any{"[" + string(c.name) + "]"}, args...)
    -	grpclog.WarningDepth(depth+1, args...)
    +	WarningDepth(depth+1, args...)
     }
     
     func (c *componentData) ErrorDepth(depth int, args ...any) {
     	args = append([]any{"[" + string(c.name) + "]"}, args...)
    -	grpclog.ErrorDepth(depth+1, args...)
    +	ErrorDepth(depth+1, args...)
     }
     
     func (c *componentData) FatalDepth(depth int, args ...any) {
     	args = append([]any{"[" + string(c.name) + "]"}, args...)
    -	grpclog.FatalDepth(depth+1, args...)
    +	FatalDepth(depth+1, args...)
     }
     
     func (c *componentData) Info(args ...any) {
    diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
    index 16928c9cb9..db320105e6 100644
    --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
    +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
    @@ -18,18 +18,15 @@
     
     // Package grpclog defines logging for grpc.
     //
    -// All logs in transport and grpclb packages only go to verbose level 2.
    -// All logs in other packages in grpc are logged in spite of the verbosity level.
    -//
    -// In the default logger,
    -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
    -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
    -package grpclog // import "google.golang.org/grpc/grpclog"
    +// In the default logger, severity level can be set by environment variable
    +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
    +// GRPC_GO_LOG_VERBOSITY_LEVEL.
    +package grpclog
     
     import (
     	"os"
     
    -	"google.golang.org/grpc/internal/grpclog"
    +	"google.golang.org/grpc/grpclog/internal"
     )
     
     func init() {
    @@ -38,58 +35,58 @@ func init() {
     
     // V reports whether verbosity level l is at least the requested verbose level.
     func V(l int) bool {
    -	return grpclog.Logger.V(l)
    +	return internal.LoggerV2Impl.V(l)
     }
     
     // Info logs to the INFO log.
     func Info(args ...any) {
    -	grpclog.Logger.Info(args...)
    +	internal.LoggerV2Impl.Info(args...)
     }
     
     // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
     func Infof(format string, args ...any) {
    -	grpclog.Logger.Infof(format, args...)
    +	internal.LoggerV2Impl.Infof(format, args...)
     }
     
     // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
     func Infoln(args ...any) {
    -	grpclog.Logger.Infoln(args...)
    +	internal.LoggerV2Impl.Infoln(args...)
     }
     
     // Warning logs to the WARNING log.
     func Warning(args ...any) {
    -	grpclog.Logger.Warning(args...)
    +	internal.LoggerV2Impl.Warning(args...)
     }
     
     // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
     func Warningf(format string, args ...any) {
    -	grpclog.Logger.Warningf(format, args...)
    +	internal.LoggerV2Impl.Warningf(format, args...)
     }
     
     // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
     func Warningln(args ...any) {
    -	grpclog.Logger.Warningln(args...)
    +	internal.LoggerV2Impl.Warningln(args...)
     }
     
     // Error logs to the ERROR log.
     func Error(args ...any) {
    -	grpclog.Logger.Error(args...)
    +	internal.LoggerV2Impl.Error(args...)
     }
     
     // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
     func Errorf(format string, args ...any) {
    -	grpclog.Logger.Errorf(format, args...)
    +	internal.LoggerV2Impl.Errorf(format, args...)
     }
     
     // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
     func Errorln(args ...any) {
    -	grpclog.Logger.Errorln(args...)
    +	internal.LoggerV2Impl.Errorln(args...)
     }
     
     // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
     // It calls os.Exit() with exit code 1.
     func Fatal(args ...any) {
    -	grpclog.Logger.Fatal(args...)
    +	internal.LoggerV2Impl.Fatal(args...)
     	// Make sure fatal logs will exit.
     	os.Exit(1)
     }
    @@ -97,15 +94,15 @@ func Fatal(args ...any) {
     // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
     // It calls os.Exit() with exit code 1.
     func Fatalf(format string, args ...any) {
    -	grpclog.Logger.Fatalf(format, args...)
    +	internal.LoggerV2Impl.Fatalf(format, args...)
     	// Make sure fatal logs will exit.
     	os.Exit(1)
     }
     
     // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
    -// It calle os.Exit()) with exit code 1.
    +// It calls os.Exit() with exit code 1.
     func Fatalln(args ...any) {
    -	grpclog.Logger.Fatalln(args...)
    +	internal.LoggerV2Impl.Fatalln(args...)
     	// Make sure fatal logs will exit.
     	os.Exit(1)
     }
    @@ -114,19 +111,76 @@ func Fatalln(args ...any) {
     //
     // Deprecated: use Info.
     func Print(args ...any) {
    -	grpclog.Logger.Info(args...)
    +	internal.LoggerV2Impl.Info(args...)
     }
     
     // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
     //
     // Deprecated: use Infof.
     func Printf(format string, args ...any) {
    -	grpclog.Logger.Infof(format, args...)
    +	internal.LoggerV2Impl.Infof(format, args...)
     }
     
     // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
     //
     // Deprecated: use Infoln.
     func Println(args ...any) {
    -	grpclog.Logger.Infoln(args...)
    +	internal.LoggerV2Impl.Infoln(args...)
    +}
    +
    +// InfoDepth logs to the INFO log at the specified depth.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func InfoDepth(depth int, args ...any) {
    +	if internal.DepthLoggerV2Impl != nil {
    +		internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
    +	} else {
    +		internal.LoggerV2Impl.Infoln(args...)
    +	}
    +}
    +
    +// WarningDepth logs to the WARNING log at the specified depth.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func WarningDepth(depth int, args ...any) {
    +	if internal.DepthLoggerV2Impl != nil {
    +		internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
    +	} else {
    +		internal.LoggerV2Impl.Warningln(args...)
    +	}
    +}
    +
    +// ErrorDepth logs to the ERROR log at the specified depth.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func ErrorDepth(depth int, args ...any) {
    +	if internal.DepthLoggerV2Impl != nil {
    +		internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
    +	} else {
    +		internal.LoggerV2Impl.Errorln(args...)
    +	}
    +}
    +
    +// FatalDepth logs to the FATAL log at the specified depth.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func FatalDepth(depth int, args ...any) {
    +	if internal.DepthLoggerV2Impl != nil {
    +		internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
    +	} else {
    +		internal.LoggerV2Impl.Fatalln(args...)
    +	}
    +	os.Exit(1)
     }
    diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
    new file mode 100644
    index 0000000000..59c03bc14c
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
    @@ -0,0 +1,26 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package internal contains functionality internal to the grpclog package.
    +package internal
    +
    +// LoggerV2Impl is the logger used for the non-depth log functions.
    +var LoggerV2Impl LoggerV2
    +
    +// DepthLoggerV2Impl is the logger used for the depth log functions.
    +var DepthLoggerV2Impl DepthLoggerV2
    diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
    new file mode 100644
    index 0000000000..e524fdd40b
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
    @@ -0,0 +1,87 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package internal
    +
    +// Logger mimics golang's standard Logger as an interface.
    +//
    +// Deprecated: use LoggerV2.
    +type Logger interface {
    +	Fatal(args ...any)
    +	Fatalf(format string, args ...any)
    +	Fatalln(args ...any)
    +	Print(args ...any)
    +	Printf(format string, args ...any)
    +	Println(args ...any)
    +}
    +
    +// LoggerWrapper wraps Logger into a LoggerV2.
    +type LoggerWrapper struct {
    +	Logger
    +}
    +
    +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
    +func (l *LoggerWrapper) Info(args ...any) {
    +	l.Logger.Print(args...)
    +}
    +
    +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
    +func (l *LoggerWrapper) Infoln(args ...any) {
    +	l.Logger.Println(args...)
    +}
    +
    +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
    +func (l *LoggerWrapper) Infof(format string, args ...any) {
    +	l.Logger.Printf(format, args...)
    +}
    +
    +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
    +func (l *LoggerWrapper) Warning(args ...any) {
    +	l.Logger.Print(args...)
    +}
    +
    +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
    +func (l *LoggerWrapper) Warningln(args ...any) {
    +	l.Logger.Println(args...)
    +}
    +
    +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
    +func (l *LoggerWrapper) Warningf(format string, args ...any) {
    +	l.Logger.Printf(format, args...)
    +}
    +
    +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    +func (l *LoggerWrapper) Error(args ...any) {
    +	l.Logger.Print(args...)
    +}
    +
    +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    +func (l *LoggerWrapper) Errorln(args ...any) {
    +	l.Logger.Println(args...)
    +}
    +
    +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    +func (l *LoggerWrapper) Errorf(format string, args ...any) {
    +	l.Logger.Printf(format, args...)
    +}
    +
    +// V reports whether verbosity level l is at least the requested verbose level.
    +func (*LoggerWrapper) V(int) bool {
    +	// Returns true for all verbose level.
    +	return true
    +}
    diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
    new file mode 100644
    index 0000000000..ed90060c3c
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
    @@ -0,0 +1,267 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package internal
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"log"
    +	"os"
    +)
    +
    +// LoggerV2 does underlying logging work for grpclog.
    +type LoggerV2 interface {
    +	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
    +	Info(args ...any)
    +	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
    +	Infoln(args ...any)
    +	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
    +	Infof(format string, args ...any)
    +	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
    +	Warning(args ...any)
    +	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
    +	Warningln(args ...any)
    +	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
    +	Warningf(format string, args ...any)
    +	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    +	Error(args ...any)
    +	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    +	Errorln(args ...any)
    +	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    +	Errorf(format string, args ...any)
    +	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    +	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    +	// Implementations may also call os.Exit() with a non-zero exit code.
    +	Fatal(args ...any)
    +	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    +	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    +	// Implementations may also call os.Exit() with a non-zero exit code.
    +	Fatalln(args ...any)
    +	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    +	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    +	// Implementations may also call os.Exit() with a non-zero exit code.
    +	Fatalf(format string, args ...any)
    +	// V reports whether verbosity level l is at least the requested verbose level.
    +	V(l int) bool
    +}
    +
    +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
    +// DepthLoggerV2, the below functions will be called with the appropriate stack
    +// depth set for trivial functions the logger may ignore.
    +//
    +// # Experimental
    +//
    +// Notice: This type is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +type DepthLoggerV2 interface {
    +	LoggerV2
    +	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
    +	InfoDepth(depth int, args ...any)
    +	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
    +	WarningDepth(depth int, args ...any)
    +	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
    +	ErrorDepth(depth int, args ...any)
    +	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
    +	FatalDepth(depth int, args ...any)
    +}
    +
    +const (
    +	// infoLog indicates Info severity.
    +	infoLog int = iota
    +	// warningLog indicates Warning severity.
    +	warningLog
    +	// errorLog indicates Error severity.
    +	errorLog
    +	// fatalLog indicates Fatal severity.
    +	fatalLog
    +)
    +
    +// severityName contains the string representation of each severity.
    +var severityName = []string{
    +	infoLog:    "INFO",
    +	warningLog: "WARNING",
    +	errorLog:   "ERROR",
    +	fatalLog:   "FATAL",
    +}
    +
    +// sprintf is fmt.Sprintf.
    +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
    +var sprintf = fmt.Sprintf
    +
    +// sprint is fmt.Sprint.
    +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
    +var sprint = fmt.Sprint
    +
    +// sprintln is fmt.Sprintln.
    +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
    +var sprintln = fmt.Sprintln
    +
    +// exit is os.Exit.
    +// This var exists to make it possible to test functions calling os.Exit.
    +var exit = os.Exit
    +
    +// loggerT is the default logger used by grpclog.
    +type loggerT struct {
    +	m          []*log.Logger
    +	v          int
    +	jsonFormat bool
    +}
    +
    +func (g *loggerT) output(severity int, s string) {
    +	sevStr := severityName[severity]
    +	if !g.jsonFormat {
    +		g.m[severity].Output(2, sevStr+": "+s)
    +		return
    +	}
    +	// TODO: we can also include the logging component, but that needs more
    +	// (API) changes.
    +	b, _ := json.Marshal(map[string]string{
    +		"severity": sevStr,
    +		"message":  s,
    +	})
    +	g.m[severity].Output(2, string(b))
    +}
    +
    +func (g *loggerT) printf(severity int, format string, args ...any) {
    +	// Note the discard check is duplicated in each print func, rather than in
    +	// output, to avoid the expensive Sprint calls.
    +	// De-duplicating this by moving to output would be a significant performance regression!
    +	if lg := g.m[severity]; lg.Writer() == io.Discard {
    +		return
    +	}
    +	g.output(severity, sprintf(format, args...))
    +}
    +
    +func (g *loggerT) print(severity int, v ...any) {
    +	if lg := g.m[severity]; lg.Writer() == io.Discard {
    +		return
    +	}
    +	g.output(severity, sprint(v...))
    +}
    +
    +func (g *loggerT) println(severity int, v ...any) {
    +	if lg := g.m[severity]; lg.Writer() == io.Discard {
    +		return
    +	}
    +	g.output(severity, sprintln(v...))
    +}
    +
    +func (g *loggerT) Info(args ...any) {
    +	g.print(infoLog, args...)
    +}
    +
    +func (g *loggerT) Infoln(args ...any) {
    +	g.println(infoLog, args...)
    +}
    +
    +func (g *loggerT) Infof(format string, args ...any) {
    +	g.printf(infoLog, format, args...)
    +}
    +
    +func (g *loggerT) Warning(args ...any) {
    +	g.print(warningLog, args...)
    +}
    +
    +func (g *loggerT) Warningln(args ...any) {
    +	g.println(warningLog, args...)
    +}
    +
    +func (g *loggerT) Warningf(format string, args ...any) {
    +	g.printf(warningLog, format, args...)
    +}
    +
    +func (g *loggerT) Error(args ...any) {
    +	g.print(errorLog, args...)
    +}
    +
    +func (g *loggerT) Errorln(args ...any) {
    +	g.println(errorLog, args...)
    +}
    +
    +func (g *loggerT) Errorf(format string, args ...any) {
    +	g.printf(errorLog, format, args...)
    +}
    +
    +func (g *loggerT) Fatal(args ...any) {
    +	g.print(fatalLog, args...)
    +	exit(1)
    +}
    +
    +func (g *loggerT) Fatalln(args ...any) {
    +	g.println(fatalLog, args...)
    +	exit(1)
    +}
    +
    +func (g *loggerT) Fatalf(format string, args ...any) {
    +	g.printf(fatalLog, format, args...)
    +	exit(1)
    +}
    +
    +func (g *loggerT) V(l int) bool {
    +	return l <= g.v
    +}
    +
    +// LoggerV2Config configures the LoggerV2 implementation.
    +type LoggerV2Config struct {
    +	// Verbosity sets the verbosity level of the logger.
    +	Verbosity int
    +	// FormatJSON controls whether the logger should output logs in JSON format.
    +	FormatJSON bool
    +}
    +
    +// combineLoggers returns a combined logger for both higher & lower severity logs,
    +// or only one if the other is io.Discard.
    +//
    +// This uses io.Discard instead of io.MultiWriter when all loggers
    +// are set to io.Discard. Both this package and the standard log package have
    +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
    +// this writing).
    +func combineLoggers(lower, higher io.Writer) io.Writer {
    +	if lower == io.Discard {
    +		return higher
    +	}
    +	if higher == io.Discard {
    +		return lower
    +	}
    +	return io.MultiWriter(lower, higher)
    +}
    +
    +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
    +// The infoW, warningW, and errorW writers are used to write log messages of
    +// different severity levels.
    +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
    +	flag := log.LstdFlags
    +	if c.FormatJSON {
    +		flag = 0
    +	}
    +
    +	warningW = combineLoggers(infoW, warningW)
    +	errorW = combineLoggers(errorW, warningW)
    +
    +	fatalW := errorW
    +
    +	m := []*log.Logger{
    +		log.New(infoW, "", flag),
    +		log.New(warningW, "", flag),
    +		log.New(errorW, "", flag),
    +		log.New(fatalW, "", flag),
    +	}
    +	return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
    +}
    diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
    index b1674d8267..4b20358570 100644
    --- a/vendor/google.golang.org/grpc/grpclog/logger.go
    +++ b/vendor/google.golang.org/grpc/grpclog/logger.go
    @@ -18,70 +18,17 @@
     
     package grpclog
     
    -import "google.golang.org/grpc/internal/grpclog"
    +import "google.golang.org/grpc/grpclog/internal"
     
     // Logger mimics golang's standard Logger as an interface.
     //
     // Deprecated: use LoggerV2.
    -type Logger interface {
    -	Fatal(args ...any)
    -	Fatalf(format string, args ...any)
    -	Fatalln(args ...any)
    -	Print(args ...any)
    -	Printf(format string, args ...any)
    -	Println(args ...any)
    -}
    +type Logger internal.Logger
     
     // SetLogger sets the logger that is used in grpc. Call only from
     // init() functions.
     //
     // Deprecated: use SetLoggerV2.
     func SetLogger(l Logger) {
    -	grpclog.Logger = &loggerWrapper{Logger: l}
    -}
    -
    -// loggerWrapper wraps Logger into a LoggerV2.
    -type loggerWrapper struct {
    -	Logger
    -}
    -
    -func (g *loggerWrapper) Info(args ...any) {
    -	g.Logger.Print(args...)
    -}
    -
    -func (g *loggerWrapper) Infoln(args ...any) {
    -	g.Logger.Println(args...)
    -}
    -
    -func (g *loggerWrapper) Infof(format string, args ...any) {
    -	g.Logger.Printf(format, args...)
    -}
    -
    -func (g *loggerWrapper) Warning(args ...any) {
    -	g.Logger.Print(args...)
    -}
    -
    -func (g *loggerWrapper) Warningln(args ...any) {
    -	g.Logger.Println(args...)
    -}
    -
    -func (g *loggerWrapper) Warningf(format string, args ...any) {
    -	g.Logger.Printf(format, args...)
    -}
    -
    -func (g *loggerWrapper) Error(args ...any) {
    -	g.Logger.Print(args...)
    -}
    -
    -func (g *loggerWrapper) Errorln(args ...any) {
    -	g.Logger.Println(args...)
    -}
    -
    -func (g *loggerWrapper) Errorf(format string, args ...any) {
    -	g.Logger.Printf(format, args...)
    -}
    -
    -func (g *loggerWrapper) V(l int) bool {
    -	// Returns true for all verbose level.
    -	return true
    +	internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
     }
    diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
    index ecfd36d713..892dc13d16 100644
    --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
    +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
    @@ -19,52 +19,16 @@
     package grpclog
     
     import (
    -	"encoding/json"
    -	"fmt"
     	"io"
    -	"log"
     	"os"
     	"strconv"
     	"strings"
     
    -	"google.golang.org/grpc/internal/grpclog"
    +	"google.golang.org/grpc/grpclog/internal"
     )
     
     // LoggerV2 does underlying logging work for grpclog.
    -type LoggerV2 interface {
    -	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
    -	Info(args ...any)
    -	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
    -	Infoln(args ...any)
    -	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
    -	Infof(format string, args ...any)
    -	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
    -	Warning(args ...any)
    -	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
    -	Warningln(args ...any)
    -	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
    -	Warningf(format string, args ...any)
    -	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    -	Error(args ...any)
    -	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    -	Errorln(args ...any)
    -	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    -	Errorf(format string, args ...any)
    -	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatal(args ...any)
    -	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatalln(args ...any)
    -	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatalf(format string, args ...any)
    -	// V reports whether verbosity level l is at least the requested verbose level.
    -	V(l int) bool
    -}
    +type LoggerV2 internal.LoggerV2
     
     // SetLoggerV2 sets logger that is used in grpc to a V2 logger.
     // Not mutex-protected, should be called before any gRPC functions.
    @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
     	if _, ok := l.(*componentData); ok {
     		panic("cannot use component logger as grpclog logger")
     	}
    -	grpclog.Logger = l
    -	grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
    -}
    -
    -const (
    -	// infoLog indicates Info severity.
    -	infoLog int = iota
    -	// warningLog indicates Warning severity.
    -	warningLog
    -	// errorLog indicates Error severity.
    -	errorLog
    -	// fatalLog indicates Fatal severity.
    -	fatalLog
    -)
    -
    -// severityName contains the string representation of each severity.
    -var severityName = []string{
    -	infoLog:    "INFO",
    -	warningLog: "WARNING",
    -	errorLog:   "ERROR",
    -	fatalLog:   "FATAL",
    -}
    -
    -// loggerT is the default logger used by grpclog.
    -type loggerT struct {
    -	m          []*log.Logger
    -	v          int
    -	jsonFormat bool
    +	internal.LoggerV2Impl = l
    +	internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
     }
     
     // NewLoggerV2 creates a loggerV2 with the provided writers.
    @@ -108,32 +46,13 @@ type loggerT struct {
     // Warning logs will be written to warningW and infoW.
     // Info logs will be written to infoW.
     func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
    -	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
    +	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
     }
     
     // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
     // verbosity level.
     func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
    -	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
    -}
    -
    -type loggerV2Config struct {
    -	verbose    int
    -	jsonFormat bool
    -}
    -
    -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
    -	var m []*log.Logger
    -	flag := log.LstdFlags
    -	if c.jsonFormat {
    -		flag = 0
    -	}
    -	m = append(m, log.New(infoW, "", flag))
    -	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
    -	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
    -	m = append(m, log.New(ew, "", flag))
    -	m = append(m, log.New(ew, "", flag))
    -	return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
    +	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
     }
     
     // newLoggerV2 creates a loggerV2 to be used as default logger.
    @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
     
     	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
     
    -	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
    -		verbose:    v,
    -		jsonFormat: jsonFormat,
    -	})
    -}
    -
    -func (g *loggerT) output(severity int, s string) {
    -	sevStr := severityName[severity]
    -	if !g.jsonFormat {
    -		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
    -		return
    -	}
    -	// TODO: we can also include the logging component, but that needs more
    -	// (API) changes.
    -	b, _ := json.Marshal(map[string]string{
    -		"severity": sevStr,
    -		"message":  s,
    +	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
    +		Verbosity:  v,
    +		FormatJSON: jsonFormat,
     	})
    -	g.m[severity].Output(2, string(b))
    -}
    -
    -func (g *loggerT) Info(args ...any) {
    -	g.output(infoLog, fmt.Sprint(args...))
    -}
    -
    -func (g *loggerT) Infoln(args ...any) {
    -	g.output(infoLog, fmt.Sprintln(args...))
    -}
    -
    -func (g *loggerT) Infof(format string, args ...any) {
    -	g.output(infoLog, fmt.Sprintf(format, args...))
    -}
    -
    -func (g *loggerT) Warning(args ...any) {
    -	g.output(warningLog, fmt.Sprint(args...))
    -}
    -
    -func (g *loggerT) Warningln(args ...any) {
    -	g.output(warningLog, fmt.Sprintln(args...))
    -}
    -
    -func (g *loggerT) Warningf(format string, args ...any) {
    -	g.output(warningLog, fmt.Sprintf(format, args...))
    -}
    -
    -func (g *loggerT) Error(args ...any) {
    -	g.output(errorLog, fmt.Sprint(args...))
    -}
    -
    -func (g *loggerT) Errorln(args ...any) {
    -	g.output(errorLog, fmt.Sprintln(args...))
    -}
    -
    -func (g *loggerT) Errorf(format string, args ...any) {
    -	g.output(errorLog, fmt.Sprintf(format, args...))
    -}
    -
    -func (g *loggerT) Fatal(args ...any) {
    -	g.output(fatalLog, fmt.Sprint(args...))
    -	os.Exit(1)
    -}
    -
    -func (g *loggerT) Fatalln(args ...any) {
    -	g.output(fatalLog, fmt.Sprintln(args...))
    -	os.Exit(1)
    -}
    -
    -func (g *loggerT) Fatalf(format string, args ...any) {
    -	g.output(fatalLog, fmt.Sprintf(format, args...))
    -	os.Exit(1)
    -}
    -
    -func (g *loggerT) V(l int) bool {
    -	return l <= g.v
     }
     
     // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
    @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
     //
     // Notice: This type is EXPERIMENTAL and may be changed or removed in a
     // later release.
    -type DepthLoggerV2 interface {
    -	LoggerV2
    -	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	InfoDepth(depth int, args ...any)
    -	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	WarningDepth(depth int, args ...any)
    -	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	ErrorDepth(depth int, args ...any)
    -	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	FatalDepth(depth int, args ...any)
    -}
    +type DepthLoggerV2 internal.DepthLoggerV2
    diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
    index fed1c011a3..b6ae7f2585 100644
    --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
    +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
    @@ -25,10 +25,10 @@ package backoff
     import (
     	"context"
     	"errors"
    +	rand "math/rand/v2"
     	"time"
     
     	grpcbackoff "google.golang.org/grpc/backoff"
    -	"google.golang.org/grpc/internal/grpcrand"
     )
     
     // Strategy defines the methodology for backing off after a grpc connection
    @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
     	}
     	// Randomize backoff delays so that if a cluster of requests start at
     	// the same time, they won't operate in lockstep.
    -	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
    +	backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
     	if backoff < 0 {
     		return 0
     	}
    diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
    new file mode 100644
    index 0000000000..85540f86a7
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
    @@ -0,0 +1,84 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package gracefulswitch
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +
    +	"google.golang.org/grpc/balancer"
    +	"google.golang.org/grpc/serviceconfig"
    +)
    +
    +type lbConfig struct {
    +	serviceconfig.LoadBalancingConfig
    +
    +	childBuilder balancer.Builder
    +	childConfig  serviceconfig.LoadBalancingConfig
    +}
    +
    +// ChildName returns the name of the child balancer of the gracefulswitch
    +// Balancer.
    +func ChildName(l serviceconfig.LoadBalancingConfig) string {
    +	return l.(*lbConfig).childBuilder.Name()
    +}
    +
    +// ParseConfig parses a child config list and returns a LB config for the
    +// gracefulswitch Balancer.
    +//
    +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
    +// names + configs as the format of the "loadBalancingConfig" field in
    +// ServiceConfig.  It returns a type that should be passed to
    +// UpdateClientConnState in the BalancerConfig field.
    +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    +	var lbCfg []map[string]json.RawMessage
    +	if err := json.Unmarshal(cfg, &lbCfg); err != nil {
    +		return nil, err
    +	}
    +	for i, e := range lbCfg {
    +		if len(e) != 1 {
    +			return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
    +		}
    +
    +		var name string
    +		var jsonCfg json.RawMessage
    +		for name, jsonCfg = range e {
    +		}
    +
    +		builder := balancer.Get(name)
    +		if builder == nil {
    +			// Skip unregistered balancer names.
    +			continue
    +		}
    +
    +		parser, ok := builder.(balancer.ConfigParser)
    +		if !ok {
    +			// This is a valid child with no config.
    +			return &lbConfig{childBuilder: builder}, nil
    +		}
    +
    +		cfg, err := parser.ParseConfig(jsonCfg)
    +		if err != nil {
    +			return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
    +		}
    +		return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
    +	}
    +
    +	return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    index 3c594e6e4e..73bb4c4ee9 100644
    --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
    @@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
     // process is not complete when this method returns. This method must be called
     // synchronously alongside the rest of the balancer.Balancer methods this
     // Graceful Switch Balancer implements.
    +//
    +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
    +// to cause the Balancer to automatically change to the new child when necessary.
     func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
    +	_, err := gsb.switchTo(builder)
    +	return err
    +}
    +
    +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
     	gsb.mu.Lock()
     	if gsb.closed {
     		gsb.mu.Unlock()
    -		return errBalancerClosed
    +		return nil, errBalancerClosed
     	}
     	bw := &balancerWrapper{
    -		gsb: gsb,
    +		builder: builder,
    +		gsb:     gsb,
     		lastState: balancer.State{
     			ConnectivityState: connectivity.Connecting,
     			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
    @@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
     			gsb.balancerCurrent = nil
     		}
     		gsb.mu.Unlock()
    -		return balancer.ErrBadResolverState
    +		return nil, balancer.ErrBadResolverState
     	}
     
     	// This write doesn't need to take gsb.mu because this field never gets read
    @@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
     	// bw.Balancer field will never be forwarded to until this SwitchTo()
     	// function returns.
     	bw.Balancer = newBalancer
    -	return nil
    +	return bw, nil
     }
     
     // Returns nil if the graceful switch balancer is closed.
    @@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper {
     }
     
     // UpdateClientConnState forwards the update to the latest balancer created.
    +//
    +// If the state's BalancerConfig is the config returned by a call to
    +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
    +// the balancer indicated by the config before forwarding its config to it, if
    +// necessary.
     func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
     	// The resolver data is only relevant to the most recent LB Policy.
     	balToUpdate := gsb.latestBalancer()
    +	gsbCfg, ok := state.BalancerConfig.(*lbConfig)
    +	if ok {
    +		// Switch to the child in the config unless it is already active.
    +		if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
    +			var err error
    +			balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
    +			if err != nil {
    +				return fmt.Errorf("could not switch to new child balancer: %w", err)
    +			}
    +		}
    +		// Unwrap the child balancer's config.
    +		state.BalancerConfig = gsbCfg.childConfig
    +	}
    +
     	if balToUpdate == nil {
     		return errBalancerClosed
     	}
    +
     	// Perform this call without gsb.mu to prevent deadlocks if the child calls
     	// back into the channel. The latest balancer can never be closed during a
     	// call from the channel, even without gsb.mu held.
    @@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) {
     	// The resolver data is only relevant to the most recent LB Policy.
     	balToUpdate := gsb.latestBalancer()
     	if balToUpdate == nil {
    +		gsb.cc.UpdateState(balancer.State{
    +			ConnectivityState: connectivity.TransientFailure,
    +			Picker:            base.NewErrPicker(err),
    +		})
     		return
     	}
     	// Perform this call without gsb.mu to prevent deadlocks if the child calls
    @@ -261,7 +294,8 @@ func (gsb *Balancer) Close() {
     // graceful switch logic.
     type balancerWrapper struct {
     	balancer.Balancer
    -	gsb *Balancer
    +	gsb     *Balancer
    +	builder balancer.Builder
     
     	lastState balancer.State
     	subconns  map[balancer.SubConn]bool // subconns created by this balancer
    diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
    index 0f31274a3c..9669328914 100644
    --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
    +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
    @@ -25,11 +25,12 @@ import (
     	"sync/atomic"
     	"time"
     
    -	"github.com/golang/protobuf/proto"
    -	"github.com/golang/protobuf/ptypes"
     	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/status"
    +	"google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/types/known/durationpb"
    +	"google.golang.org/protobuf/types/known/timestamppb"
     )
     
     type callIDGenerator struct {
    @@ -64,7 +65,7 @@ type TruncatingMethodLogger struct {
     	callID          uint64
     	idWithinCallGen *callIDGenerator
     
    -	sink Sink // TODO(blog): make this plugable.
    +	sink Sink // TODO(blog): make this pluggable.
     }
     
     // NewTruncatingMethodLogger returns a new truncating method logger.
    @@ -79,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
     		callID:          idGen.next(),
     		idWithinCallGen: &callIDGenerator{},
     
    -		sink: DefaultSink, // TODO(blog): make it plugable.
    +		sink: DefaultSink, // TODO(blog): make it pluggable.
     	}
     }
     
    @@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
     // in TruncatingMethodLogger as possible.
     func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
     	m := c.toProto()
    -	timestamp, _ := ptypes.TimestampProto(time.Now())
    +	timestamp := timestamppb.Now()
     	m.Timestamp = timestamp
     	m.CallId = ml.callID
     	m.SequenceIdWithinCall = ml.idWithinCallGen.next()
    @@ -105,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
     }
     
     // Log creates a proto binary log entry, and logs it to the sink.
    -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
    +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
     	ml.sink.Write(ml.Build(c))
     }
     
    @@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
     		Authority:  c.Authority,
     	}
     	if c.Timeout > 0 {
    -		clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
    +		clientHeader.Timeout = durationpb.New(c.Timeout)
     	}
     	ret := &binlogpb.GrpcLogEntry{
     		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
    @@ -396,7 +397,7 @@ func metadataKeyOmit(key string) bool {
     	switch key {
     	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
     		return true
    -	case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
    +	case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
     		return false
     	}
     	return strings.HasPrefix(key, "grpc-")
    diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
    index 264de387c2..9ea598b14c 100644
    --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
    +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
    @@ -25,8 +25,8 @@ import (
     	"sync"
     	"time"
     
    -	"github.com/golang/protobuf/proto"
     	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
    +	"google.golang.org/protobuf/proto"
     )
     
     var (
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
    new file mode 100644
    index 0000000000..3ec662799a
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
    @@ -0,0 +1,270 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"sync/atomic"
    +
    +	"google.golang.org/grpc/connectivity"
    +)
    +
    +// Channel represents a channel within channelz, which includes metrics and
    +// internal channelz data, such as channelz id, child list, etc.
    +type Channel struct {
    +	Entity
    +	// ID is the channelz id of this channel.
    +	ID int64
    +	// RefName is the human readable reference string of this channel.
    +	RefName string
    +
    +	closeCalled bool
    +	nestedChans map[int64]string
    +	subChans    map[int64]string
    +	Parent      *Channel
    +	trace       *ChannelTrace
    +	// traceRefCount is the number of trace events that reference this channel.
    +	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
    +	traceRefCount int32
    +
    +	// ChannelMetrics holds connectivity state, target and call metrics for the
    +	// channel within channelz.
    +	ChannelMetrics ChannelMetrics
    +}
    +
    +// Implemented to make Channel implement the Identifier interface used for
    +// nesting.
    +func (c *Channel) channelzIdentifier() {}
    +
    +// String returns a string representation of the Channel, including its parent
    +// entity and ID.
    +func (c *Channel) String() string {
    +	if c.Parent == nil {
    +		return fmt.Sprintf("Channel #%d", c.ID)
    +	}
    +	return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
    +}
    +
    +func (c *Channel) id() int64 {
    +	return c.ID
    +}
    +
    +// SubChans returns a copy of the map of sub-channels associated with the
    +// Channel.
    +func (c *Channel) SubChans() map[int64]string {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return copyMap(c.subChans)
    +}
    +
    +// NestedChans returns a copy of the map of nested channels associated with the
    +// Channel.
    +func (c *Channel) NestedChans() map[int64]string {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return copyMap(c.nestedChans)
    +}
    +
    +// Trace returns a copy of the Channel's trace data.
    +func (c *Channel) Trace() *ChannelTrace {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return c.trace.copy()
    +}
    +
    +// ChannelMetrics holds connectivity state, target and call metrics for the
    +// channel within channelz.
    +type ChannelMetrics struct {
    +	// The current connectivity state of the channel.
    +	State atomic.Pointer[connectivity.State]
    +	// The target this channel originally tried to connect to.  May be absent
    +	Target atomic.Pointer[string]
    +	// The number of calls started on the channel.
    +	CallsStarted atomic.Int64
    +	// The number of calls that have completed with an OK status.
    +	CallsSucceeded atomic.Int64
    +	// The number of calls that have a completed with a non-OK status.
    +	CallsFailed atomic.Int64
    +	// The last time a call was started on the channel.
    +	LastCallStartedTimestamp atomic.Int64
    +}
    +
    +// CopyFrom copies the metrics in o to c.  For testing only.
    +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
    +	c.State.Store(o.State.Load())
    +	c.Target.Store(o.Target.Load())
    +	c.CallsStarted.Store(o.CallsStarted.Load())
    +	c.CallsSucceeded.Store(o.CallsSucceeded.Load())
    +	c.CallsFailed.Store(o.CallsFailed.Load())
    +	c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
    +}
    +
    +// Equal returns true iff the metrics of c are the same as the metrics of o.
    +// For testing only.
    +func (c *ChannelMetrics) Equal(o any) bool {
    +	oc, ok := o.(*ChannelMetrics)
    +	if !ok {
    +		return false
    +	}
    +	if (c.State.Load() == nil) != (oc.State.Load() == nil) {
    +		return false
    +	}
    +	if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
    +		return false
    +	}
    +	if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
    +		return false
    +	}
    +	if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
    +		return false
    +	}
    +	return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
    +		c.CallsFailed.Load() == oc.CallsFailed.Load() &&
    +		c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
    +		c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
    +}
    +
    +func strFromPointer(s *string) string {
    +	if s == nil {
    +		return ""
    +	}
    +	return *s
    +}
    +
    +// String returns a string representation of the ChannelMetrics, including its
    +// state, target, and call metrics.
    +func (c *ChannelMetrics) String() string {
    +	return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
    +		c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
    +	)
    +}
    +
    +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
    +// specified initial values for testing purposes.
    +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
    +	c := &ChannelMetrics{}
    +	c.State.Store(&state)
    +	c.Target.Store(&target)
    +	c.CallsStarted.Store(started)
    +	c.CallsSucceeded.Store(succeeded)
    +	c.CallsFailed.Store(failed)
    +	c.LastCallStartedTimestamp.Store(timestamp)
    +	return c
    +}
    +
    +func (c *Channel) addChild(id int64, e entry) {
    +	switch v := e.(type) {
    +	case *SubChannel:
    +		c.subChans[id] = v.RefName
    +	case *Channel:
    +		c.nestedChans[id] = v.RefName
    +	default:
    +		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
    +	}
    +}
    +
    +func (c *Channel) deleteChild(id int64) {
    +	delete(c.subChans, id)
    +	delete(c.nestedChans, id)
    +	c.deleteSelfIfReady()
    +}
    +
    +func (c *Channel) triggerDelete() {
    +	c.closeCalled = true
    +	c.deleteSelfIfReady()
    +}
    +
    +func (c *Channel) getParentID() int64 {
    +	if c.Parent == nil {
    +		return -1
    +	}
    +	return c.Parent.ID
    +}
    +
    +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
    +// deleting the channel reference from its parent's child list.
    +//
    +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
    +// corresponding grpc object has been invoked, and the channel does not have any children left.
    +//
    +// The returned boolean value indicates whether the channel has been successfully deleted from tree.
    +func (c *Channel) deleteSelfFromTree() (deleted bool) {
    +	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
    +		return false
    +	}
    +	// not top channel
    +	if c.Parent != nil {
    +		c.Parent.deleteChild(c.ID)
    +	}
    +	return true
    +}
    +
    +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
    +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
    +// channel, and its memory will be garbage collected.
    +//
    +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
    +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
    +// the trace of the referenced entity must not be deleted. In order to release the resource allocated
    +// by grpc, the reference to the grpc object is reset to a dummy object.
    +//
    +// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
    +//
    +// It returns a bool to indicate whether the channel can be safely deleted from map.
    +func (c *Channel) deleteSelfFromMap() (delete bool) {
    +	return c.getTraceRefCount() == 0
    +}
    +
    +// deleteSelfIfReady tries to delete the channel itself from the channelz database.
    +// The delete process includes two steps:
    +//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
    +//     parent's child list.
    +//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
    +//     will return entry not found error.
    +func (c *Channel) deleteSelfIfReady() {
    +	if !c.deleteSelfFromTree() {
    +		return
    +	}
    +	if !c.deleteSelfFromMap() {
    +		return
    +	}
    +	db.deleteEntry(c.ID)
    +	c.trace.clear()
    +}
    +
    +func (c *Channel) getChannelTrace() *ChannelTrace {
    +	return c.trace
    +}
    +
    +func (c *Channel) incrTraceRefCount() {
    +	atomic.AddInt32(&c.traceRefCount, 1)
    +}
    +
    +func (c *Channel) decrTraceRefCount() {
    +	atomic.AddInt32(&c.traceRefCount, -1)
    +}
    +
    +func (c *Channel) getTraceRefCount() int {
    +	i := atomic.LoadInt32(&c.traceRefCount)
    +	return int(i)
    +}
    +
    +func (c *Channel) getRefName() string {
    +	return c.RefName
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
    new file mode 100644
    index 0000000000..64c791953d
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
    @@ -0,0 +1,395 @@
    +/*
    + *
    + * Copyright 2018 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"sort"
    +	"sync"
    +	"time"
    +)
    +
    +// entry represents a node in the channelz database.
    +type entry interface {
    +	// addChild adds a child e, whose channelz id is id to child list
    +	addChild(id int64, e entry)
    +	// deleteChild deletes a child with channelz id to be id from child list
    +	deleteChild(id int64)
    +	// triggerDelete tries to delete self from channelz database. However, if
    +	// child list is not empty, then deletion from the database is on hold until
    +	// the last child is deleted from database.
    +	triggerDelete()
    +	// deleteSelfIfReady check whether triggerDelete() has been called before,
    +	// and whether child list is now empty. If both conditions are met, then
    +	// delete self from database.
    +	deleteSelfIfReady()
    +	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
    +	getParentID() int64
    +	Entity
    +}
    +
    +// channelMap is the storage data structure for channelz.
    +//
    +// Methods of channelMap can be divided into two categories with respect to
    +// locking.
    +//
    +// 1. Methods acquire the global lock.
    +// 2. Methods that can only be called when global lock is held.
    +//
    +// A second type of method need always to be called inside a first type of method.
    +type channelMap struct {
    +	mu               sync.RWMutex
    +	topLevelChannels map[int64]struct{}
    +	channels         map[int64]*Channel
    +	subChannels      map[int64]*SubChannel
    +	sockets          map[int64]*Socket
    +	servers          map[int64]*Server
    +}
    +
    +func newChannelMap() *channelMap {
    +	return &channelMap{
    +		topLevelChannels: make(map[int64]struct{}),
    +		channels:         make(map[int64]*Channel),
    +		subChannels:      make(map[int64]*SubChannel),
    +		sockets:          make(map[int64]*Socket),
    +		servers:          make(map[int64]*Server),
    +	}
    +}
    +
    +func (c *channelMap) addServer(id int64, s *Server) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	s.cm = c
    +	c.servers[id] = s
    +}
    +
    +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	cn.trace.cm = c
    +	c.channels[id] = cn
    +	if isTopChannel {
    +		c.topLevelChannels[id] = struct{}{}
    +	} else if p := c.channels[pid]; p != nil {
    +		p.addChild(id, cn)
    +	} else {
    +		logger.Infof("channel %d references invalid parent ID %d", id, pid)
    +	}
    +}
    +
    +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	sc.trace.cm = c
    +	c.subChannels[id] = sc
    +	if p := c.channels[pid]; p != nil {
    +		p.addChild(id, sc)
    +	} else {
    +		logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
    +	}
    +}
    +
    +func (c *channelMap) addSocket(s *Socket) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	s.cm = c
    +	c.sockets[s.ID] = s
    +	if s.Parent == nil {
    +		logger.Infof("normal socket %d has no parent", s.ID)
    +	}
    +	s.Parent.(entry).addChild(s.ID, s)
    +}
    +
    +// removeEntry triggers the removal of an entry, which may not indeed delete the
    +// entry, if it has to wait on the deletion of its children and until no other
    +// entity's channel trace references it.  It may lead to a chain of entry
    +// deletion. For example, deleting the last socket of a gracefully shutting down
    +// server will lead to the server being also deleted.
    +func (c *channelMap) removeEntry(id int64) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	c.findEntry(id).triggerDelete()
    +}
    +
    +// tracedChannel represents tracing operations which are present on both
    +// channels and subChannels.
    +type tracedChannel interface {
    +	getChannelTrace() *ChannelTrace
    +	incrTraceRefCount()
    +	decrTraceRefCount()
    +	getRefName() string
    +}
    +
    +// c.mu must be held by the caller
    +func (c *channelMap) decrTraceRefCount(id int64) {
    +	e := c.findEntry(id)
    +	if v, ok := e.(tracedChannel); ok {
    +		v.decrTraceRefCount()
    +		e.deleteSelfIfReady()
    +	}
    +}
    +
    +// c.mu must be held by the caller.
    +func (c *channelMap) findEntry(id int64) entry {
    +	if v, ok := c.channels[id]; ok {
    +		return v
    +	}
    +	if v, ok := c.subChannels[id]; ok {
    +		return v
    +	}
    +	if v, ok := c.servers[id]; ok {
    +		return v
    +	}
    +	if v, ok := c.sockets[id]; ok {
    +		return v
    +	}
    +	return &dummyEntry{idNotFound: id}
    +}
    +
    +// c.mu must be held by the caller
    +//
    +// deleteEntry deletes an entry from the channelMap. Before calling this method,
    +// caller must check this entry is ready to be deleted, i.e removeEntry() has
    +// been called on it, and no children still exist.
    +func (c *channelMap) deleteEntry(id int64) entry {
    +	if v, ok := c.sockets[id]; ok {
    +		delete(c.sockets, id)
    +		return v
    +	}
    +	if v, ok := c.subChannels[id]; ok {
    +		delete(c.subChannels, id)
    +		return v
    +	}
    +	if v, ok := c.channels[id]; ok {
    +		delete(c.channels, id)
    +		delete(c.topLevelChannels, id)
    +		return v
    +	}
    +	if v, ok := c.servers[id]; ok {
    +		delete(c.servers, id)
    +		return v
    +	}
    +	return &dummyEntry{idNotFound: id}
    +}
    +
    +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +	child := c.findEntry(id)
    +	childTC, ok := child.(tracedChannel)
    +	if !ok {
    +		return
    +	}
    +	childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
    +	if desc.Parent != nil {
    +		parent := c.findEntry(child.getParentID())
    +		var chanType RefChannelType
    +		switch child.(type) {
    +		case *Channel:
    +			chanType = RefChannel
    +		case *SubChannel:
    +			chanType = RefSubChannel
    +		}
    +		if parentTC, ok := parent.(tracedChannel); ok {
    +			parentTC.getChannelTrace().append(&traceEvent{
    +				Desc:      desc.Parent.Desc,
    +				Severity:  desc.Parent.Severity,
    +				Timestamp: time.Now(),
    +				RefID:     id,
    +				RefName:   childTC.getRefName(),
    +				RefType:   chanType,
    +			})
    +			childTC.incrTraceRefCount()
    +		}
    +	}
    +}
    +
    +type int64Slice []int64
    +
    +func (s int64Slice) Len() int           { return len(s) }
    +func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
    +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
    +
    +func copyMap(m map[int64]string) map[int64]string {
    +	n := make(map[int64]string)
    +	for k, v := range m {
    +		n[k] = v
    +	}
    +	return n
    +}
    +
    +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
    +	if maxResults <= 0 {
    +		maxResults = EntriesPerPage
    +	}
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	l := int64(len(c.topLevelChannels))
    +	ids := make([]int64, 0, l)
    +
    +	for k := range c.topLevelChannels {
    +		ids = append(ids, k)
    +	}
    +	sort.Sort(int64Slice(ids))
    +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
    +	end := true
    +	var t []*Channel
    +	for _, v := range ids[idx:] {
    +		if len(t) == maxResults {
    +			end = false
    +			break
    +		}
    +		if cn, ok := c.channels[v]; ok {
    +			t = append(t, cn)
    +		}
    +	}
    +	return t, end
    +}
    +
    +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
    +	if maxResults <= 0 {
    +		maxResults = EntriesPerPage
    +	}
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	ids := make([]int64, 0, len(c.servers))
    +	for k := range c.servers {
    +		ids = append(ids, k)
    +	}
    +	sort.Sort(int64Slice(ids))
    +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
    +	end := true
    +	var s []*Server
    +	for _, v := range ids[idx:] {
    +		if len(s) == maxResults {
    +			end = false
    +			break
    +		}
    +		if svr, ok := c.servers[v]; ok {
    +			s = append(s, svr)
    +		}
    +	}
    +	return s, end
    +}
    +
    +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
    +	if maxResults <= 0 {
    +		maxResults = EntriesPerPage
    +	}
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	svr, ok := c.servers[id]
    +	if !ok {
    +		// server with id doesn't exist.
    +		return nil, true
    +	}
    +	svrskts := svr.sockets
    +	ids := make([]int64, 0, len(svrskts))
    +	sks := make([]*Socket, 0, min(len(svrskts), maxResults))
    +	for k := range svrskts {
    +		ids = append(ids, k)
    +	}
    +	sort.Sort(int64Slice(ids))
    +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
    +	end := true
    +	for _, v := range ids[idx:] {
    +		if len(sks) == maxResults {
    +			end = false
    +			break
    +		}
    +		if ns, ok := c.sockets[v]; ok {
    +			sks = append(sks, ns)
    +		}
    +	}
    +	return sks, end
    +}
    +
    +func (c *channelMap) getChannel(id int64) *Channel {
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	return c.channels[id]
    +}
    +
    +func (c *channelMap) getSubChannel(id int64) *SubChannel {
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	return c.subChannels[id]
    +}
    +
    +func (c *channelMap) getSocket(id int64) *Socket {
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	return c.sockets[id]
    +}
    +
    +func (c *channelMap) getServer(id int64) *Server {
    +	c.mu.RLock()
    +	defer c.mu.RUnlock()
    +	return c.servers[id]
    +}
    +
    +type dummyEntry struct {
    +	// dummyEntry is a fake entry to handle entry not found case.
    +	idNotFound int64
    +	Entity
    +}
    +
    +func (d *dummyEntry) String() string {
    +	return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
    +}
    +
    +func (d *dummyEntry) ID() int64 { return d.idNotFound }
    +
    +func (d *dummyEntry) addChild(id int64, e entry) {
    +	// Note: It is possible for a normal program to reach here under race
    +	// condition.  For example, there could be a race between ClientConn.Close()
    +	// info being propagated to addrConn and http2Client. ClientConn.Close()
    +	// cancel the context and result in http2Client to error. The error info is
    +	// then caught by transport monitor and before addrConn.tearDown() is called
    +	// in side ClientConn.Close(). Therefore, the addrConn will create a new
    +	// transport. And when registering the new transport in channelz, its parent
    +	// addrConn could have already been torn down and deleted from channelz
    +	// tracking, and thus reach the code here.
    +	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
    +}
    +
    +func (d *dummyEntry) deleteChild(id int64) {
    +	// It is possible for a normal program to reach here under race condition.
    +	// Refer to the example described in addChild().
    +	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
    +}
    +
    +func (d *dummyEntry) triggerDelete() {
    +	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
    +}
    +
    +func (*dummyEntry) deleteSelfIfReady() {
    +	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
    +}
    +
    +func (*dummyEntry) getParentID() int64 {
    +	return 0
    +}
    +
    +// Entity is implemented by all channelz types.
    +type Entity interface {
    +	isEntity()
    +	fmt.Stringer
    +	id() int64
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
    index fc094f3441..078bb81238 100644
    --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
    +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
    @@ -16,47 +16,32 @@
      *
      */
     
    -// Package channelz defines APIs for enabling channelz service, entry
    +// Package channelz defines internal APIs for enabling channelz service, entry
     // registration/deletion, and accessing channelz data. It also defines channelz
     // metric struct formats.
    -//
    -// All APIs in this package are experimental.
     package channelz
     
     import (
    -	"errors"
    -	"sort"
    -	"sync"
     	"sync/atomic"
     	"time"
     
    -	"google.golang.org/grpc/grpclog"
     	"google.golang.org/grpc/internal"
     )
     
    -const (
    -	defaultMaxTraceEntry int32 = 30
    -)
    -
     var (
     	// IDGen is the global channelz entity ID generator.  It should not be used
     	// outside this package except by tests.
     	IDGen IDGenerator
     
    -	db dbWrapper
    -	// EntryPerPage defines the number of channelz entries to be shown on a web page.
    -	EntryPerPage  = int64(50)
    -	curState      int32
    -	maxTraceEntry = defaultMaxTraceEntry
    +	db = newChannelMap()
    +	// EntriesPerPage defines the number of channelz entries to be shown on a web page.
    +	EntriesPerPage = 50
    +	curState       int32
     )
     
     // TurnOn turns on channelz data collection.
     func TurnOn() {
    -	if !IsOn() {
    -		db.set(newChannelMap())
    -		IDGen.Reset()
    -		atomic.StoreInt32(&curState, 1)
    -	}
    +	atomic.StoreInt32(&curState, 1)
     }
     
     func init() {
    @@ -70,49 +55,15 @@ func IsOn() bool {
     	return atomic.LoadInt32(&curState) == 1
     }
     
    -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
    -// Setting it to 0 will disable channel tracing.
    -func SetMaxTraceEntry(i int32) {
    -	atomic.StoreInt32(&maxTraceEntry, i)
    -}
    -
    -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
    -func ResetMaxTraceEntryToDefault() {
    -	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
    -}
    -
    -func getMaxTraceEntry() int {
    -	i := atomic.LoadInt32(&maxTraceEntry)
    -	return int(i)
    -}
    -
    -// dbWarpper wraps around a reference to internal channelz data storage, and
    -// provide synchronized functionality to set and get the reference.
    -type dbWrapper struct {
    -	mu sync.RWMutex
    -	DB *channelMap
    -}
    -
    -func (d *dbWrapper) set(db *channelMap) {
    -	d.mu.Lock()
    -	d.DB = db
    -	d.mu.Unlock()
    -}
    -
    -func (d *dbWrapper) get() *channelMap {
    -	d.mu.RLock()
    -	defer d.mu.RUnlock()
    -	return d.DB
    -}
    -
     // GetTopChannels returns a slice of top channel's ChannelMetric, along with a
     // boolean indicating whether there's more top channels to be queried for.
     //
    -// The arg id specifies that only top channel with id at or above it will be included
    -// in the result. The returned slice is up to a length of the arg maxResults or
    -// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
    -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
    -	return db.get().GetTopChannels(id, maxResults)
    +// The arg id specifies that only top channel with id at or above it will be
    +// included in the result. The returned slice is up to a length of the arg
    +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
    +// id order.
    +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
    +	return db.getTopChannels(id, maxResults)
     }
     
     // GetServers returns a slice of server's ServerMetric, along with a
    @@ -120,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
     //
     // The arg id specifies that only server with id at or above it will be included
     // in the result. The returned slice is up to a length of the arg maxResults or
    -// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
    -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
    -	return db.get().GetServers(id, maxResults)
    +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
    +func GetServers(id int64, maxResults int) ([]*Server, bool) {
    +	return db.getServers(id, maxResults)
     }
     
     // GetServerSockets returns a slice of server's (identified by id) normal socket's
    -// SocketMetric, along with a boolean indicating whether there's more sockets to
    +// SocketMetrics, along with a boolean indicating whether there's more sockets to
     // be queried for.
     //
     // The arg startID specifies that only sockets with id at or above it will be
     // included in the result. The returned slice is up to a length of the arg maxResults
    -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
    -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
    -	return db.get().GetServerSockets(id, startID, maxResults)
    +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
    +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
    +	return db.getServerSockets(id, startID, maxResults)
     }
     
    -// GetChannel returns the ChannelMetric for the channel (identified by id).
    -func GetChannel(id int64) *ChannelMetric {
    -	return db.get().GetChannel(id)
    +// GetChannel returns the Channel for the channel (identified by id).
    +func GetChannel(id int64) *Channel {
    +	return db.getChannel(id)
     }
     
    -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
    -func GetSubChannel(id int64) *SubChannelMetric {
    -	return db.get().GetSubChannel(id)
    +// GetSubChannel returns the SubChannel for the subchannel (identified by id).
    +func GetSubChannel(id int64) *SubChannel {
    +	return db.getSubChannel(id)
     }
     
    -// GetSocket returns the SocketInternalMetric for the socket (identified by id).
    -func GetSocket(id int64) *SocketMetric {
    -	return db.get().GetSocket(id)
    +// GetSocket returns the Socket for the socket (identified by id).
    +func GetSocket(id int64) *Socket {
    +	return db.getSocket(id)
     }
     
     // GetServer returns the ServerMetric for the server (identified by id).
    -func GetServer(id int64) *ServerMetric {
    -	return db.get().GetServer(id)
    +func GetServer(id int64) *Server {
    +	return db.getServer(id)
     }
     
     // RegisterChannel registers the given channel c in the channelz database with
    -// ref as its reference name, and adds it to the child list of its parent
    -// (identified by pid). pid == nil means no parent.
    +// target as its target and reference name, and adds it to the child list of its
    +// parent.  parent == nil means no parent.
     //
     // Returns a unique channelz identifier assigned to this channel.
     //
     // If channelz is not turned ON, the channelz database is not mutated.
    -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
    +func RegisterChannel(parent *Channel, target string) *Channel {
     	id := IDGen.genID()
    -	var parent int64
    -	isTopChannel := true
    -	if pid != nil {
    -		isTopChannel = false
    -		parent = pid.Int()
    -	}
     
     	if !IsOn() {
    -		return newIdentifer(RefChannel, id, pid)
    +		return &Channel{ID: id}
     	}
     
    -	cn := &channel{
    -		refName:     ref,
    -		c:           c,
    -		subChans:    make(map[int64]string),
    +	isTopChannel := parent == nil
    +
    +	cn := &Channel{
    +		ID:          id,
    +		RefName:     target,
     		nestedChans: make(map[int64]string),
    -		id:          id,
    -		pid:         parent,
    -		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
    +		subChans:    make(map[int64]string),
    +		Parent:      parent,
    +		trace:       &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
     	}
    -	db.get().addChannel(id, cn, isTopChannel, parent)
    -	return newIdentifer(RefChannel, id, pid)
    +	cn.ChannelMetrics.Target.Store(&target)
    +	db.addChannel(id, cn, isTopChannel, cn.getParentID())
    +	return cn
     }
     
     // RegisterSubChannel registers the given subChannel c in the channelz database
    @@ -196,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
     // Returns a unique channelz identifier assigned to this subChannel.
     //
     // If channelz is not turned ON, the channelz database is not mutated.
    -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
    -	if pid == nil {
    -		return nil, errors.New("a SubChannel's parent id cannot be nil")
    -	}
    +func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
     	id := IDGen.genID()
    -	if !IsOn() {
    -		return newIdentifer(RefSubChannel, id, pid), nil
    +	sc := &SubChannel{
    +		ID:      id,
    +		RefName: ref,
    +		parent:  parent,
     	}
     
    -	sc := &subChannel{
    -		refName: ref,
    -		c:       c,
    -		sockets: make(map[int64]string),
    -		id:      id,
    -		pid:     pid.Int(),
    -		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
    +	if !IsOn() {
    +		return sc
     	}
    -	db.get().addSubChannel(id, sc, pid.Int())
    -	return newIdentifer(RefSubChannel, id, pid), nil
    +
    +	sc.sockets = make(map[int64]string)
    +	sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
    +	db.addSubChannel(id, sc, parent.ID)
    +	return sc
     }
     
     // RegisterServer registers the given server s in channelz database. It returns
     // the unique channelz tracking id assigned to this server.
     //
     // If channelz is not turned ON, the channelz database is not mutated.
    -func RegisterServer(s Server, ref string) *Identifier {
    +func RegisterServer(ref string) *Server {
     	id := IDGen.genID()
     	if !IsOn() {
    -		return newIdentifer(RefServer, id, nil)
    +		return &Server{ID: id}
     	}
     
    -	svr := &server{
    -		refName:       ref,
    -		s:             s,
    +	svr := &Server{
    +		RefName:       ref,
     		sockets:       make(map[int64]string),
     		listenSockets: make(map[int64]string),
    -		id:            id,
    -	}
    -	db.get().addServer(id, svr)
    -	return newIdentifer(RefServer, id, nil)
    -}
    -
    -// RegisterListenSocket registers the given listen socket s in channelz database
    -// with ref as its reference name, and add it to the child list of its parent
    -// (identified by pid). It returns the unique channelz tracking id assigned to
    -// this listen socket.
    -//
    -// If channelz is not turned ON, the channelz database is not mutated.
    -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
    -	if pid == nil {
    -		return nil, errors.New("a ListenSocket's parent id cannot be 0")
    +		ID:            id,
     	}
    -	id := IDGen.genID()
    -	if !IsOn() {
    -		return newIdentifer(RefListenSocket, id, pid), nil
    -	}
    -
    -	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
    -	db.get().addListenSocket(id, ls, pid.Int())
    -	return newIdentifer(RefListenSocket, id, pid), nil
    +	db.addServer(id, svr)
    +	return svr
     }
     
    -// RegisterNormalSocket registers the given normal socket s in channelz database
    +// RegisterSocket registers the given normal socket s in channelz database
     // with ref as its reference name, and adds it to the child list of its parent
    -// (identified by pid). It returns the unique channelz tracking id assigned to
    -// this normal socket.
    +// (identified by skt.Parent, which must be set). It returns the unique channelz
    +// tracking id assigned to this normal socket.
     //
     // If channelz is not turned ON, the channelz database is not mutated.
    -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
    -	if pid == nil {
    -		return nil, errors.New("a NormalSocket's parent id cannot be 0")
    -	}
    -	id := IDGen.genID()
    -	if !IsOn() {
    -		return newIdentifer(RefNormalSocket, id, pid), nil
    +func RegisterSocket(skt *Socket) *Socket {
    +	skt.ID = IDGen.genID()
    +	if IsOn() {
    +		db.addSocket(skt)
     	}
    -
    -	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
    -	db.get().addNormalSocket(id, ns, pid.Int())
    -	return newIdentifer(RefNormalSocket, id, pid), nil
    +	return skt
     }
     
     // RemoveEntry removes an entry with unique channelz tracking id to be id from
     // channelz database.
     //
     // If channelz is not turned ON, this function is a no-op.
    -func RemoveEntry(id *Identifier) {
    +func RemoveEntry(id int64) {
     	if !IsOn() {
     		return
     	}
    -	db.get().removeEntry(id.Int())
    -}
    -
    -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
    -// the event to be added to the channel trace.
    -//
    -// The Parent field is optional. It is used for an event that will be recorded
    -// in the entity's parent trace.
    -type TraceEventDesc struct {
    -	Desc     string
    -	Severity Severity
    -	Parent   *TraceEventDesc
    -}
    -
    -// AddTraceEvent adds trace related to the entity with specified id, using the
    -// provided TraceEventDesc.
    -//
    -// If channelz is not turned ON, this will simply log the event descriptions.
    -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
    -	// Log only the trace description associated with the bottom most entity.
    -	switch desc.Severity {
    -	case CtUnknown, CtInfo:
    -		l.InfoDepth(depth+1, withParens(id)+desc.Desc)
    -	case CtWarning:
    -		l.WarningDepth(depth+1, withParens(id)+desc.Desc)
    -	case CtError:
    -		l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
    -	}
    -
    -	if getMaxTraceEntry() == 0 {
    -		return
    -	}
    -	if IsOn() {
    -		db.get().traceEvent(id.Int(), desc)
    -	}
    -}
    -
    -// channelMap is the storage data structure for channelz.
    -// Methods of channelMap can be divided in two two categories with respect to locking.
    -// 1. Methods acquire the global lock.
    -// 2. Methods that can only be called when global lock is held.
    -// A second type of method need always to be called inside a first type of method.
    -type channelMap struct {
    -	mu               sync.RWMutex
    -	topLevelChannels map[int64]struct{}
    -	servers          map[int64]*server
    -	channels         map[int64]*channel
    -	subChannels      map[int64]*subChannel
    -	listenSockets    map[int64]*listenSocket
    -	normalSockets    map[int64]*normalSocket
    -}
    -
    -func newChannelMap() *channelMap {
    -	return &channelMap{
    -		topLevelChannels: make(map[int64]struct{}),
    -		channels:         make(map[int64]*channel),
    -		listenSockets:    make(map[int64]*listenSocket),
    -		normalSockets:    make(map[int64]*normalSocket),
    -		servers:          make(map[int64]*server),
    -		subChannels:      make(map[int64]*subChannel),
    -	}
    -}
    -
    -func (c *channelMap) addServer(id int64, s *server) {
    -	c.mu.Lock()
    -	s.cm = c
    -	c.servers[id] = s
    -	c.mu.Unlock()
    -}
    -
    -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
    -	c.mu.Lock()
    -	cn.cm = c
    -	cn.trace.cm = c
    -	c.channels[id] = cn
    -	if isTopChannel {
    -		c.topLevelChannels[id] = struct{}{}
    -	} else {
    -		c.findEntry(pid).addChild(id, cn)
    -	}
    -	c.mu.Unlock()
    -}
    -
    -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
    -	c.mu.Lock()
    -	sc.cm = c
    -	sc.trace.cm = c
    -	c.subChannels[id] = sc
    -	c.findEntry(pid).addChild(id, sc)
    -	c.mu.Unlock()
    -}
    -
    -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
    -	c.mu.Lock()
    -	ls.cm = c
    -	c.listenSockets[id] = ls
    -	c.findEntry(pid).addChild(id, ls)
    -	c.mu.Unlock()
    -}
    -
    -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
    -	c.mu.Lock()
    -	ns.cm = c
    -	c.normalSockets[id] = ns
    -	c.findEntry(pid).addChild(id, ns)
    -	c.mu.Unlock()
    -}
    -
    -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
    -// wait on the deletion of its children and until no other entity's channel trace references it.
    -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
    -// shutting down server will lead to the server being also deleted.
    -func (c *channelMap) removeEntry(id int64) {
    -	c.mu.Lock()
    -	c.findEntry(id).triggerDelete()
    -	c.mu.Unlock()
    -}
    -
    -// c.mu must be held by the caller
    -func (c *channelMap) decrTraceRefCount(id int64) {
    -	e := c.findEntry(id)
    -	if v, ok := e.(tracedChannel); ok {
    -		v.decrTraceRefCount()
    -		e.deleteSelfIfReady()
    -	}
    -}
    -
    -// c.mu must be held by the caller.
    -func (c *channelMap) findEntry(id int64) entry {
    -	var v entry
    -	var ok bool
    -	if v, ok = c.channels[id]; ok {
    -		return v
    -	}
    -	if v, ok = c.subChannels[id]; ok {
    -		return v
    -	}
    -	if v, ok = c.servers[id]; ok {
    -		return v
    -	}
    -	if v, ok = c.listenSockets[id]; ok {
    -		return v
    -	}
    -	if v, ok = c.normalSockets[id]; ok {
    -		return v
    -	}
    -	return &dummyEntry{idNotFound: id}
    -}
    -
    -// c.mu must be held by the caller
    -// deleteEntry simply deletes an entry from the channelMap. Before calling this
    -// method, caller must check this entry is ready to be deleted, i.e removeEntry()
    -// has been called on it, and no children still exist.
    -// Conditionals are ordered by the expected frequency of deletion of each entity
    -// type, in order to optimize performance.
    -func (c *channelMap) deleteEntry(id int64) {
    -	var ok bool
    -	if _, ok = c.normalSockets[id]; ok {
    -		delete(c.normalSockets, id)
    -		return
    -	}
    -	if _, ok = c.subChannels[id]; ok {
    -		delete(c.subChannels, id)
    -		return
    -	}
    -	if _, ok = c.channels[id]; ok {
    -		delete(c.channels, id)
    -		delete(c.topLevelChannels, id)
    -		return
    -	}
    -	if _, ok = c.listenSockets[id]; ok {
    -		delete(c.listenSockets, id)
    -		return
    -	}
    -	if _, ok = c.servers[id]; ok {
    -		delete(c.servers, id)
    -		return
    -	}
    -}
    -
    -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
    -	c.mu.Lock()
    -	child := c.findEntry(id)
    -	childTC, ok := child.(tracedChannel)
    -	if !ok {
    -		c.mu.Unlock()
    -		return
    -	}
    -	childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
    -	if desc.Parent != nil {
    -		parent := c.findEntry(child.getParentID())
    -		var chanType RefChannelType
    -		switch child.(type) {
    -		case *channel:
    -			chanType = RefChannel
    -		case *subChannel:
    -			chanType = RefSubChannel
    -		}
    -		if parentTC, ok := parent.(tracedChannel); ok {
    -			parentTC.getChannelTrace().append(&TraceEvent{
    -				Desc:      desc.Parent.Desc,
    -				Severity:  desc.Parent.Severity,
    -				Timestamp: time.Now(),
    -				RefID:     id,
    -				RefName:   childTC.getRefName(),
    -				RefType:   chanType,
    -			})
    -			childTC.incrTraceRefCount()
    -		}
    -	}
    -	c.mu.Unlock()
    -}
    -
    -type int64Slice []int64
    -
    -func (s int64Slice) Len() int           { return len(s) }
    -func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
    -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
    -
    -func copyMap(m map[int64]string) map[int64]string {
    -	n := make(map[int64]string)
    -	for k, v := range m {
    -		n[k] = v
    -	}
    -	return n
    -}
    -
    -func min(a, b int64) int64 {
    -	if a < b {
    -		return a
    -	}
    -	return b
    -}
    -
    -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
    -	if maxResults <= 0 {
    -		maxResults = EntryPerPage
    -	}
    -	c.mu.RLock()
    -	l := int64(len(c.topLevelChannels))
    -	ids := make([]int64, 0, l)
    -	cns := make([]*channel, 0, min(l, maxResults))
    -
    -	for k := range c.topLevelChannels {
    -		ids = append(ids, k)
    -	}
    -	sort.Sort(int64Slice(ids))
    -	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
    -	count := int64(0)
    -	var end bool
    -	var t []*ChannelMetric
    -	for i, v := range ids[idx:] {
    -		if count == maxResults {
    -			break
    -		}
    -		if cn, ok := c.channels[v]; ok {
    -			cns = append(cns, cn)
    -			t = append(t, &ChannelMetric{
    -				NestedChans: copyMap(cn.nestedChans),
    -				SubChans:    copyMap(cn.subChans),
    -			})
    -			count++
    -		}
    -		if i == len(ids[idx:])-1 {
    -			end = true
    -			break
    -		}
    -	}
    -	c.mu.RUnlock()
    -	if count == 0 {
    -		end = true
    -	}
    -
    -	for i, cn := range cns {
    -		t[i].ChannelData = cn.c.ChannelzMetric()
    -		t[i].ID = cn.id
    -		t[i].RefName = cn.refName
    -		t[i].Trace = cn.trace.dumpData()
    -	}
    -	return t, end
    -}
    -
    -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
    -	if maxResults <= 0 {
    -		maxResults = EntryPerPage
    -	}
    -	c.mu.RLock()
    -	l := int64(len(c.servers))
    -	ids := make([]int64, 0, l)
    -	ss := make([]*server, 0, min(l, maxResults))
    -	for k := range c.servers {
    -		ids = append(ids, k)
    -	}
    -	sort.Sort(int64Slice(ids))
    -	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
    -	count := int64(0)
    -	var end bool
    -	var s []*ServerMetric
    -	for i, v := range ids[idx:] {
    -		if count == maxResults {
    -			break
    -		}
    -		if svr, ok := c.servers[v]; ok {
    -			ss = append(ss, svr)
    -			s = append(s, &ServerMetric{
    -				ListenSockets: copyMap(svr.listenSockets),
    -			})
    -			count++
    -		}
    -		if i == len(ids[idx:])-1 {
    -			end = true
    -			break
    -		}
    -	}
    -	c.mu.RUnlock()
    -	if count == 0 {
    -		end = true
    -	}
    -
    -	for i, svr := range ss {
    -		s[i].ServerData = svr.s.ChannelzMetric()
    -		s[i].ID = svr.id
    -		s[i].RefName = svr.refName
    -	}
    -	return s, end
    -}
    -
    -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
    -	if maxResults <= 0 {
    -		maxResults = EntryPerPage
    -	}
    -	var svr *server
    -	var ok bool
    -	c.mu.RLock()
    -	if svr, ok = c.servers[id]; !ok {
    -		// server with id doesn't exist.
    -		c.mu.RUnlock()
    -		return nil, true
    -	}
    -	svrskts := svr.sockets
    -	l := int64(len(svrskts))
    -	ids := make([]int64, 0, l)
    -	sks := make([]*normalSocket, 0, min(l, maxResults))
    -	for k := range svrskts {
    -		ids = append(ids, k)
    -	}
    -	sort.Sort(int64Slice(ids))
    -	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
    -	count := int64(0)
    -	var end bool
    -	for i, v := range ids[idx:] {
    -		if count == maxResults {
    -			break
    -		}
    -		if ns, ok := c.normalSockets[v]; ok {
    -			sks = append(sks, ns)
    -			count++
    -		}
    -		if i == len(ids[idx:])-1 {
    -			end = true
    -			break
    -		}
    -	}
    -	c.mu.RUnlock()
    -	if count == 0 {
    -		end = true
    -	}
    -	s := make([]*SocketMetric, 0, len(sks))
    -	for _, ns := range sks {
    -		sm := &SocketMetric{}
    -		sm.SocketData = ns.s.ChannelzMetric()
    -		sm.ID = ns.id
    -		sm.RefName = ns.refName
    -		s = append(s, sm)
    -	}
    -	return s, end
    -}
    -
    -func (c *channelMap) GetChannel(id int64) *ChannelMetric {
    -	cm := &ChannelMetric{}
    -	var cn *channel
    -	var ok bool
    -	c.mu.RLock()
    -	if cn, ok = c.channels[id]; !ok {
    -		// channel with id doesn't exist.
    -		c.mu.RUnlock()
    -		return nil
    -	}
    -	cm.NestedChans = copyMap(cn.nestedChans)
    -	cm.SubChans = copyMap(cn.subChans)
    -	// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
    -	// holding the lock to prevent potential data race.
    -	chanCopy := cn.c
    -	c.mu.RUnlock()
    -	cm.ChannelData = chanCopy.ChannelzMetric()
    -	cm.ID = cn.id
    -	cm.RefName = cn.refName
    -	cm.Trace = cn.trace.dumpData()
    -	return cm
    -}
    -
    -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
    -	cm := &SubChannelMetric{}
    -	var sc *subChannel
    -	var ok bool
    -	c.mu.RLock()
    -	if sc, ok = c.subChannels[id]; !ok {
    -		// subchannel with id doesn't exist.
    -		c.mu.RUnlock()
    -		return nil
    -	}
    -	cm.Sockets = copyMap(sc.sockets)
    -	// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
    -	// holding the lock to prevent potential data race.
    -	chanCopy := sc.c
    -	c.mu.RUnlock()
    -	cm.ChannelData = chanCopy.ChannelzMetric()
    -	cm.ID = sc.id
    -	cm.RefName = sc.refName
    -	cm.Trace = sc.trace.dumpData()
    -	return cm
    -}
    -
    -func (c *channelMap) GetSocket(id int64) *SocketMetric {
    -	sm := &SocketMetric{}
    -	c.mu.RLock()
    -	if ls, ok := c.listenSockets[id]; ok {
    -		c.mu.RUnlock()
    -		sm.SocketData = ls.s.ChannelzMetric()
    -		sm.ID = ls.id
    -		sm.RefName = ls.refName
    -		return sm
    -	}
    -	if ns, ok := c.normalSockets[id]; ok {
    -		c.mu.RUnlock()
    -		sm.SocketData = ns.s.ChannelzMetric()
    -		sm.ID = ns.id
    -		sm.RefName = ns.refName
    -		return sm
    -	}
    -	c.mu.RUnlock()
    -	return nil
    -}
    -
    -func (c *channelMap) GetServer(id int64) *ServerMetric {
    -	sm := &ServerMetric{}
    -	var svr *server
    -	var ok bool
    -	c.mu.RLock()
    -	if svr, ok = c.servers[id]; !ok {
    -		c.mu.RUnlock()
    -		return nil
    -	}
    -	sm.ListenSockets = copyMap(svr.listenSockets)
    -	c.mu.RUnlock()
    -	sm.ID = svr.id
    -	sm.RefName = svr.refName
    -	sm.ServerData = svr.s.ChannelzMetric()
    -	return sm
    +	db.removeEntry(id)
     }
     
     // IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
    @@ -761,3 +220,11 @@ func (i *IDGenerator) Reset() {
     func (i *IDGenerator) genID() int64 {
     	return atomic.AddInt64(&i.id, 1)
     }
    +
    +// Identifier is an opaque channelz identifier used to expose channelz symbols
    +// outside of grpc.  Currently only implemented by Channel since no other
    +// types require exposure outside grpc.
    +type Identifier interface {
    +	Entity
    +	channelzIdentifier()
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go
    deleted file mode 100644
    index c9a27acd37..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/id.go
    +++ /dev/null
    @@ -1,75 +0,0 @@
    -/*
    - *
    - * Copyright 2022 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -import "fmt"
    -
    -// Identifier is an opaque identifier which uniquely identifies an entity in the
    -// channelz database.
    -type Identifier struct {
    -	typ RefChannelType
    -	id  int64
    -	str string
    -	pid *Identifier
    -}
    -
    -// Type returns the entity type corresponding to id.
    -func (id *Identifier) Type() RefChannelType {
    -	return id.typ
    -}
    -
    -// Int returns the integer identifier corresponding to id.
    -func (id *Identifier) Int() int64 {
    -	return id.id
    -}
    -
    -// String returns a string representation of the entity corresponding to id.
    -//
    -// This includes some information about the parent as well. Examples:
    -// Top-level channel: [Channel #channel-number]
    -// Nested channel:    [Channel #parent-channel-number Channel #channel-number]
    -// Sub channel:       [Channel #parent-channel SubChannel #subchannel-number]
    -func (id *Identifier) String() string {
    -	return id.str
    -}
    -
    -// Equal returns true if other is the same as id.
    -func (id *Identifier) Equal(other *Identifier) bool {
    -	if (id != nil) != (other != nil) {
    -		return false
    -	}
    -	if id == nil && other == nil {
    -		return true
    -	}
    -	return id.typ == other.typ && id.id == other.id && id.pid == other.pid
    -}
    -
    -// NewIdentifierForTesting returns a new opaque identifier to be used only for
    -// testing purposes.
    -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
    -	return newIdentifer(typ, id, pid)
    -}
    -
    -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
    -	str := fmt.Sprintf("%s #%d", typ, id)
    -	if pid != nil {
    -		str = fmt.Sprintf("%s %s", pid, str)
    -	}
    -	return &Identifier{typ: typ, id: id, str: str, pid: pid}
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
    index f89e6f77bb..ee4d721258 100644
    --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
    +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
    @@ -26,53 +26,49 @@ import (
     
     var logger = grpclog.Component("channelz")
     
    -func withParens(id *Identifier) string {
    -	return "[" + id.String() + "] "
    -}
    -
     // Info logs and adds a trace event if channelz is on.
    -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprint(args...),
     		Severity: CtInfo,
     	})
     }
     
     // Infof logs and adds a trace event if channelz is on.
    -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprintf(format, args...),
     		Severity: CtInfo,
     	})
     }
     
     // Warning logs and adds a trace event if channelz is on.
    -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprint(args...),
     		Severity: CtWarning,
     	})
     }
     
     // Warningf logs and adds a trace event if channelz is on.
    -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprintf(format, args...),
     		Severity: CtWarning,
     	})
     }
     
     // Error logs and adds a trace event if channelz is on.
    -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprint(args...),
     		Severity: CtError,
     	})
     }
     
     // Errorf logs and adds a trace event if channelz is on.
    -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
    -	AddTraceEvent(l, id, 1, &TraceEventDesc{
    +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
    +	AddTraceEvent(l, e, 1, &TraceEvent{
     		Desc:     fmt.Sprintf(format, args...),
     		Severity: CtError,
     	})
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
    new file mode 100644
    index 0000000000..b5a8249929
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
    @@ -0,0 +1,121 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"sync/atomic"
    +)
    +
    +// Server is the channelz representation of a server.
    +type Server struct {
    +	Entity
    +	ID      int64
    +	RefName string
    +
    +	ServerMetrics ServerMetrics
    +
    +	closeCalled   bool
    +	sockets       map[int64]string
    +	listenSockets map[int64]string
    +	cm            *channelMap
    +}
    +
    +// ServerMetrics defines a struct containing metrics for servers.
    +type ServerMetrics struct {
    +	// The number of incoming calls started on the server.
    +	CallsStarted atomic.Int64
    +	// The number of incoming calls that have completed with an OK status.
    +	CallsSucceeded atomic.Int64
    +	// The number of incoming calls that have a completed with a non-OK status.
    +	CallsFailed atomic.Int64
    +	// The last time a call was started on the server.
    +	LastCallStartedTimestamp atomic.Int64
    +}
    +
    +// NewServerMetricsForTesting returns an initialized ServerMetrics.
    +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
    +	sm := &ServerMetrics{}
    +	sm.CallsStarted.Store(started)
    +	sm.CallsSucceeded.Store(succeeded)
    +	sm.CallsFailed.Store(failed)
    +	sm.LastCallStartedTimestamp.Store(timestamp)
    +	return sm
    +}
    +
    +// CopyFrom copies the metrics data from the provided ServerMetrics
    +// instance into the current instance.
    +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
    +	sm.CallsStarted.Store(o.CallsStarted.Load())
    +	sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
    +	sm.CallsFailed.Store(o.CallsFailed.Load())
    +	sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
    +}
    +
    +// ListenSockets returns the listening sockets for s.
    +func (s *Server) ListenSockets() map[int64]string {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return copyMap(s.listenSockets)
    +}
    +
    +// String returns a printable description of s.
    +func (s *Server) String() string {
    +	return fmt.Sprintf("Server #%d", s.ID)
    +}
    +
    +func (s *Server) id() int64 {
    +	return s.ID
    +}
    +
    +func (s *Server) addChild(id int64, e entry) {
    +	switch v := e.(type) {
    +	case *Socket:
    +		switch v.SocketType {
    +		case SocketTypeNormal:
    +			s.sockets[id] = v.RefName
    +		case SocketTypeListen:
    +			s.listenSockets[id] = v.RefName
    +		}
    +	default:
    +		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
    +	}
    +}
    +
    +func (s *Server) deleteChild(id int64) {
    +	delete(s.sockets, id)
    +	delete(s.listenSockets, id)
    +	s.deleteSelfIfReady()
    +}
    +
    +func (s *Server) triggerDelete() {
    +	s.closeCalled = true
    +	s.deleteSelfIfReady()
    +}
    +
    +func (s *Server) deleteSelfIfReady() {
    +	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
    +		return
    +	}
    +	s.cm.deleteEntry(s.ID)
    +}
    +
    +func (s *Server) getParentID() int64 {
    +	return 0
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
    new file mode 100644
    index 0000000000..90103847c5
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
    @@ -0,0 +1,137 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"net"
    +	"sync/atomic"
    +
    +	"google.golang.org/grpc/credentials"
    +)
    +
    +// SocketMetrics defines the struct that the implementor of Socket interface
    +// should return from ChannelzMetric().
    +type SocketMetrics struct {
    +	// The number of streams that have been started.
    +	StreamsStarted atomic.Int64
    +	// The number of streams that have ended successfully:
    +	// On client side, receiving frame with eos bit set.
    +	// On server side, sending frame with eos bit set.
    +	StreamsSucceeded atomic.Int64
    +	// The number of streams that have ended unsuccessfully:
    +	// On client side, termination without receiving frame with eos bit set.
    +	// On server side, termination without sending frame with eos bit set.
    +	StreamsFailed atomic.Int64
    +	// The number of messages successfully sent on this socket.
    +	MessagesSent     atomic.Int64
    +	MessagesReceived atomic.Int64
    +	// The number of keep alives sent.  This is typically implemented with HTTP/2
    +	// ping messages.
    +	KeepAlivesSent atomic.Int64
    +	// The last time a stream was created by this endpoint.  Usually unset for
    +	// servers.
    +	LastLocalStreamCreatedTimestamp atomic.Int64
    +	// The last time a stream was created by the remote endpoint.  Usually unset
    +	// for clients.
    +	LastRemoteStreamCreatedTimestamp atomic.Int64
    +	// The last time a message was sent by this endpoint.
    +	LastMessageSentTimestamp atomic.Int64
    +	// The last time a message was received by this endpoint.
    +	LastMessageReceivedTimestamp atomic.Int64
    +}
    +
    +// EphemeralSocketMetrics are metrics that change rapidly and are tracked
    +// outside of channelz.
    +type EphemeralSocketMetrics struct {
    +	// The amount of window, granted to the local endpoint by the remote endpoint.
    +	// This may be slightly out of date due to network latency.  This does NOT
    +	// include stream level or TCP level flow control info.
    +	LocalFlowControlWindow int64
    +	// The amount of window, granted to the remote endpoint by the local endpoint.
    +	// This may be slightly out of date due to network latency.  This does NOT
    +	// include stream level or TCP level flow control info.
    +	RemoteFlowControlWindow int64
    +}
    +
    +// SocketType represents the type of socket.
    +type SocketType string
    +
    +// SocketType can be one of these.
    +const (
    +	SocketTypeNormal = "NormalSocket"
    +	SocketTypeListen = "ListenSocket"
    +)
    +
    +// Socket represents a socket within channelz which includes socket
    +// metrics and data related to socket activity and provides methods
    +// for managing and interacting with sockets.
    +type Socket struct {
    +	Entity
    +	SocketType       SocketType
    +	ID               int64
    +	Parent           Entity
    +	cm               *channelMap
    +	SocketMetrics    SocketMetrics
    +	EphemeralMetrics func() *EphemeralSocketMetrics
    +
    +	RefName string
    +	// The locally bound address.  Immutable.
    +	LocalAddr net.Addr
    +	// The remote bound address.  May be absent.  Immutable.
    +	RemoteAddr net.Addr
    +	// Optional, represents the name of the remote endpoint, if different than
    +	// the original target name.  Immutable.
    +	RemoteName string
    +	// Immutable.
    +	SocketOptions *SocketOptionData
    +	// Immutable.
    +	Security credentials.ChannelzSecurityValue
    +}
    +
    +// String returns a string representation of the Socket, including its parent
    +// entity, socket type, and ID.
    +func (ls *Socket) String() string {
    +	return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
    +}
    +
    +func (ls *Socket) id() int64 {
    +	return ls.ID
    +}
    +
    +func (ls *Socket) addChild(id int64, e entry) {
    +	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
    +}
    +
    +func (ls *Socket) deleteChild(id int64) {
    +	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
    +}
    +
    +func (ls *Socket) triggerDelete() {
    +	ls.cm.deleteEntry(ls.ID)
    +	ls.Parent.(entry).deleteChild(ls.ID)
    +}
    +
    +func (ls *Socket) deleteSelfIfReady() {
    +	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
    +}
    +
    +func (ls *Socket) getParentID() int64 {
    +	return ls.Parent.id()
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
    new file mode 100644
    index 0000000000..b20802e6e9
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
    @@ -0,0 +1,153 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"sync/atomic"
    +)
    +
    +// SubChannel is the channelz representation of a subchannel.
    +type SubChannel struct {
    +	Entity
    +	// ID is the channelz id of this subchannel.
    +	ID int64
    +	// RefName is the human readable reference string of this subchannel.
    +	RefName       string
    +	closeCalled   bool
    +	sockets       map[int64]string
    +	parent        *Channel
    +	trace         *ChannelTrace
    +	traceRefCount int32
    +
    +	ChannelMetrics ChannelMetrics
    +}
    +
    +func (sc *SubChannel) String() string {
    +	return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
    +}
    +
    +func (sc *SubChannel) id() int64 {
    +	return sc.ID
    +}
    +
    +// Sockets returns a copy of the sockets map associated with the SubChannel.
    +func (sc *SubChannel) Sockets() map[int64]string {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return copyMap(sc.sockets)
    +}
    +
    +// Trace returns a copy of the ChannelTrace associated with the SubChannel.
    +func (sc *SubChannel) Trace() *ChannelTrace {
    +	db.mu.RLock()
    +	defer db.mu.RUnlock()
    +	return sc.trace.copy()
    +}
    +
    +func (sc *SubChannel) addChild(id int64, e entry) {
    +	if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
    +		sc.sockets[id] = v.RefName
    +	} else {
    +		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
    +	}
    +}
    +
    +func (sc *SubChannel) deleteChild(id int64) {
    +	delete(sc.sockets, id)
    +	sc.deleteSelfIfReady()
    +}
    +
    +func (sc *SubChannel) triggerDelete() {
    +	sc.closeCalled = true
    +	sc.deleteSelfIfReady()
    +}
    +
    +func (sc *SubChannel) getParentID() int64 {
    +	return sc.parent.ID
    +}
    +
    +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
    +// means deleting the subchannel reference from its parent's child list.
    +//
    +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
    +// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
    +//
    +// The returned boolean value indicates whether the channel has been successfully deleted from tree.
    +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
    +	if !sc.closeCalled || len(sc.sockets) != 0 {
    +		return false
    +	}
    +	sc.parent.deleteChild(sc.ID)
    +	return true
    +}
    +
    +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
    +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
    +// the subchannel, and its memory will be garbage collected.
    +//
    +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
    +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
    +// the trace of the referenced entity must not be deleted. In order to release the resource allocated
    +// by grpc, the reference to the grpc object is reset to a dummy object.
    +//
    +// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
    +//
    +// It returns a bool to indicate whether the channel can be safely deleted from map.
    +func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
    +	return sc.getTraceRefCount() == 0
    +}
    +
    +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
    +// The delete process includes two steps:
    +//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
    +//     its parent's child list.
    +//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
    +//     by id will return entry not found error.
    +func (sc *SubChannel) deleteSelfIfReady() {
    +	if !sc.deleteSelfFromTree() {
    +		return
    +	}
    +	if !sc.deleteSelfFromMap() {
    +		return
    +	}
    +	db.deleteEntry(sc.ID)
    +	sc.trace.clear()
    +}
    +
    +func (sc *SubChannel) getChannelTrace() *ChannelTrace {
    +	return sc.trace
    +}
    +
    +func (sc *SubChannel) incrTraceRefCount() {
    +	atomic.AddInt32(&sc.traceRefCount, 1)
    +}
    +
    +func (sc *SubChannel) decrTraceRefCount() {
    +	atomic.AddInt32(&sc.traceRefCount, -1)
    +}
    +
    +func (sc *SubChannel) getTraceRefCount() int {
    +	i := atomic.LoadInt32(&sc.traceRefCount)
    +	return int(i)
    +}
    +
    +func (sc *SubChannel) getRefName() string {
    +	return sc.RefName
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
    new file mode 100644
    index 0000000000..5ac73ff833
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
    @@ -0,0 +1,65 @@
    +/*
    + *
    + * Copyright 2018 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"syscall"
    +
    +	"golang.org/x/sys/unix"
    +)
    +
    +// SocketOptionData defines the struct to hold socket option data, and related
    +// getter function to obtain info from fd.
    +type SocketOptionData struct {
    +	Linger      *unix.Linger
    +	RecvTimeout *unix.Timeval
    +	SendTimeout *unix.Timeval
    +	TCPInfo     *unix.TCPInfo
    +}
    +
    +// Getsockopt defines the function to get socket options requested by channelz.
    +// It is to be passed to syscall.RawConn.Control().
    +func (s *SocketOptionData) Getsockopt(fd uintptr) {
    +	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
    +		s.Linger = v
    +	}
    +	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
    +		s.RecvTimeout = v
    +	}
    +	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
    +		s.SendTimeout = v
    +	}
    +	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
    +		s.TCPInfo = v
    +	}
    +}
    +
    +// GetSocketOption gets the socket option info of the conn.
    +func GetSocketOption(socket any) *SocketOptionData {
    +	c, ok := socket.(syscall.Conn)
    +	if !ok {
    +		return nil
    +	}
    +	data := &SocketOptionData{}
    +	if rawConn, err := c.SyscallConn(); err == nil {
    +		rawConn.Control(data.Getsockopt)
    +		return data
    +	}
    +	return nil
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
    new file mode 100644
    index 0000000000..0e6e18e185
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
    @@ -0,0 +1,47 @@
    +//go:build !linux
    +
    +/*
    + *
    + * Copyright 2018 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"sync"
    +)
    +
    +var once sync.Once
    +
    +// SocketOptionData defines the struct to hold socket option data, and related
    +// getter function to obtain info from fd.
    +// Windows OS doesn't support Socket Option
    +type SocketOptionData struct {
    +}
    +
    +// Getsockopt defines the function to get socket options requested by channelz.
    +// It is to be passed to syscall.RawConn.Control().
    +// Windows OS doesn't support Socket Option
    +func (s *SocketOptionData) Getsockopt(uintptr) {
    +	once.Do(func() {
    +		logger.Warning("Channelz: socket options are not supported on non-linux environments")
    +	})
    +}
    +
    +// GetSocketOption gets the socket option info of the conn.
    +func GetSocketOption(any) *SocketOptionData {
    +	return nil
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
    new file mode 100644
    index 0000000000..2bffe47776
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
    @@ -0,0 +1,213 @@
    +/*
    + *
    + * Copyright 2018 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package channelz
    +
    +import (
    +	"fmt"
    +	"sync"
    +	"sync/atomic"
    +	"time"
    +
    +	"google.golang.org/grpc/grpclog"
    +)
    +
    +const (
    +	defaultMaxTraceEntry int32 = 30
    +)
    +
    +var maxTraceEntry = defaultMaxTraceEntry
    +
    +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
    +// channel/subchannel).  Setting it to 0 will disable channel tracing.
    +func SetMaxTraceEntry(i int32) {
    +	atomic.StoreInt32(&maxTraceEntry, i)
    +}
    +
    +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
    +// entity to default.
    +func ResetMaxTraceEntryToDefault() {
    +	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
    +}
    +
    +func getMaxTraceEntry() int {
    +	i := atomic.LoadInt32(&maxTraceEntry)
    +	return int(i)
    +}
    +
    +// traceEvent is an internal representation of a single trace event
    +type traceEvent struct {
    +	// Desc is a simple description of the trace event.
    +	Desc string
    +	// Severity states the severity of this trace event.
    +	Severity Severity
    +	// Timestamp is the event time.
    +	Timestamp time.Time
    +	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
    +	// involved in this event.
    +	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
    +	RefID int64
    +	// RefName is the reference name for the entity that gets referenced in the event.
    +	RefName string
    +	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
    +	RefType RefChannelType
    +}
    +
    +// TraceEvent is what the caller of AddTraceEvent should provide to describe the
    +// event to be added to the channel trace.
    +//
    +// The Parent field is optional. It is used for an event that will be recorded
    +// in the entity's parent trace.
    +type TraceEvent struct {
    +	Desc     string
    +	Severity Severity
    +	Parent   *TraceEvent
    +}
    +
    +// ChannelTrace provides tracing information for a channel.
    +// It tracks various events and metadata related to the channel's lifecycle
    +// and operations.
    +type ChannelTrace struct {
    +	cm          *channelMap
    +	clearCalled bool
    +	// The time when the trace was created.
    +	CreationTime time.Time
    +	// A counter for the number of events recorded in the
    +	// trace.
    +	EventNum int64
    +	mu       sync.Mutex
    +	// A slice of traceEvent pointers representing the events recorded for
    +	// this channel.
    +	Events []*traceEvent
    +}
    +
    +func (c *ChannelTrace) copy() *ChannelTrace {
    +	return &ChannelTrace{
    +		CreationTime: c.CreationTime,
    +		EventNum:     c.EventNum,
    +		Events:       append(([]*traceEvent)(nil), c.Events...),
    +	}
    +}
    +
    +func (c *ChannelTrace) append(e *traceEvent) {
    +	c.mu.Lock()
    +	if len(c.Events) == getMaxTraceEntry() {
    +		del := c.Events[0]
    +		c.Events = c.Events[1:]
    +		if del.RefID != 0 {
    +			// start recursive cleanup in a goroutine to not block the call originated from grpc.
    +			go func() {
    +				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
    +				c.cm.mu.Lock()
    +				c.cm.decrTraceRefCount(del.RefID)
    +				c.cm.mu.Unlock()
    +			}()
    +		}
    +	}
    +	e.Timestamp = time.Now()
    +	c.Events = append(c.Events, e)
    +	c.EventNum++
    +	c.mu.Unlock()
    +}
    +
    +func (c *ChannelTrace) clear() {
    +	if c.clearCalled {
    +		return
    +	}
    +	c.clearCalled = true
    +	c.mu.Lock()
    +	for _, e := range c.Events {
    +		if e.RefID != 0 {
    +			// caller should have already held the c.cm.mu lock.
    +			c.cm.decrTraceRefCount(e.RefID)
    +		}
    +	}
    +	c.mu.Unlock()
    +}
    +
    +// Severity is the severity level of a trace event.
    +// The canonical enumeration of all valid values is here:
    +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
    +type Severity int
    +
    +const (
    +	// CtUnknown indicates unknown severity of a trace event.
    +	CtUnknown Severity = iota
    +	// CtInfo indicates info level severity of a trace event.
    +	CtInfo
    +	// CtWarning indicates warning level severity of a trace event.
    +	CtWarning
    +	// CtError indicates error level severity of a trace event.
    +	CtError
    +)
    +
    +// RefChannelType is the type of the entity being referenced in a trace event.
    +type RefChannelType int
    +
    +const (
    +	// RefUnknown indicates an unknown entity type, the zero value for this type.
    +	RefUnknown RefChannelType = iota
    +	// RefChannel indicates the referenced entity is a Channel.
    +	RefChannel
    +	// RefSubChannel indicates the referenced entity is a SubChannel.
    +	RefSubChannel
    +	// RefServer indicates the referenced entity is a Server.
    +	RefServer
    +	// RefListenSocket indicates the referenced entity is a ListenSocket.
    +	RefListenSocket
    +	// RefNormalSocket indicates the referenced entity is a NormalSocket.
    +	RefNormalSocket
    +)
    +
    +var refChannelTypeToString = map[RefChannelType]string{
    +	RefUnknown:      "Unknown",
    +	RefChannel:      "Channel",
    +	RefSubChannel:   "SubChannel",
    +	RefServer:       "Server",
    +	RefListenSocket: "ListenSocket",
    +	RefNormalSocket: "NormalSocket",
    +}
    +
    +// String returns a string representation of the RefChannelType
    +func (r RefChannelType) String() string {
    +	return refChannelTypeToString[r]
    +}
    +
    +// AddTraceEvent adds trace related to the entity with specified id, using the
    +// provided TraceEventDesc.
    +//
    +// If channelz is not turned ON, this will simply log the event descriptions.
    +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
    +	// Log only the trace description associated with the bottom most entity.
    +	d := fmt.Sprintf("[%s]%s", e, desc.Desc)
    +	switch desc.Severity {
    +	case CtUnknown, CtInfo:
    +		l.InfoDepth(depth+1, d)
    +	case CtWarning:
    +		l.WarningDepth(depth+1, d)
    +	case CtError:
    +		l.ErrorDepth(depth+1, d)
    +	}
    +
    +	if getMaxTraceEntry() == 0 {
    +		return
    +	}
    +	if IsOn() {
    +		db.traceEvent(e.id(), desc)
    +	}
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
    deleted file mode 100644
    index 1d4020f537..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/types.go
    +++ /dev/null
    @@ -1,727 +0,0 @@
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -import (
    -	"net"
    -	"sync"
    -	"sync/atomic"
    -	"time"
    -
    -	"google.golang.org/grpc/connectivity"
    -	"google.golang.org/grpc/credentials"
    -)
    -
    -// entry represents a node in the channelz database.
    -type entry interface {
    -	// addChild adds a child e, whose channelz id is id to child list
    -	addChild(id int64, e entry)
    -	// deleteChild deletes a child with channelz id to be id from child list
    -	deleteChild(id int64)
    -	// triggerDelete tries to delete self from channelz database. However, if child
    -	// list is not empty, then deletion from the database is on hold until the last
    -	// child is deleted from database.
    -	triggerDelete()
    -	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
    -	// list is now empty. If both conditions are met, then delete self from database.
    -	deleteSelfIfReady()
    -	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
    -	getParentID() int64
    -}
    -
    -// dummyEntry is a fake entry to handle entry not found case.
    -type dummyEntry struct {
    -	idNotFound int64
    -}
    -
    -func (d *dummyEntry) addChild(id int64, e entry) {
    -	// Note: It is possible for a normal program to reach here under race condition.
    -	// For example, there could be a race between ClientConn.Close() info being propagated
    -	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
    -	// in http2Client to error. The error info is then caught by transport monitor
    -	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
    -	// the addrConn will create a new transport. And when registering the new transport in
    -	// channelz, its parent addrConn could have already been torn down and deleted
    -	// from channelz tracking, and thus reach the code here.
    -	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
    -}
    -
    -func (d *dummyEntry) deleteChild(id int64) {
    -	// It is possible for a normal program to reach here under race condition.
    -	// Refer to the example described in addChild().
    -	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
    -}
    -
    -func (d *dummyEntry) triggerDelete() {
    -	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
    -}
    -
    -func (*dummyEntry) deleteSelfIfReady() {
    -	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
    -}
    -
    -func (*dummyEntry) getParentID() int64 {
    -	return 0
    -}
    -
    -// ChannelMetric defines the info channelz provides for a specific Channel, which
    -// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
    -// child list, etc.
    -type ChannelMetric struct {
    -	// ID is the channelz id of this channel.
    -	ID int64
    -	// RefName is the human readable reference string of this channel.
    -	RefName string
    -	// ChannelData contains channel internal metric reported by the channel through
    -	// ChannelzMetric().
    -	ChannelData *ChannelInternalMetric
    -	// NestedChans tracks the nested channel type children of this channel in the format of
    -	// a map from nested channel channelz id to corresponding reference string.
    -	NestedChans map[int64]string
    -	// SubChans tracks the subchannel type children of this channel in the format of a
    -	// map from subchannel channelz id to corresponding reference string.
    -	SubChans map[int64]string
    -	// Sockets tracks the socket type children of this channel in the format of a map
    -	// from socket channelz id to corresponding reference string.
    -	// Note current grpc implementation doesn't allow channel having sockets directly,
    -	// therefore, this is field is unused.
    -	Sockets map[int64]string
    -	// Trace contains the most recent traced events.
    -	Trace *ChannelTrace
    -}
    -
    -// SubChannelMetric defines the info channelz provides for a specific SubChannel,
    -// which includes ChannelInternalMetric and channelz-specific data, such as
    -// channelz id, child list, etc.
    -type SubChannelMetric struct {
    -	// ID is the channelz id of this subchannel.
    -	ID int64
    -	// RefName is the human readable reference string of this subchannel.
    -	RefName string
    -	// ChannelData contains subchannel internal metric reported by the subchannel
    -	// through ChannelzMetric().
    -	ChannelData *ChannelInternalMetric
    -	// NestedChans tracks the nested channel type children of this subchannel in the format of
    -	// a map from nested channel channelz id to corresponding reference string.
    -	// Note current grpc implementation doesn't allow subchannel to have nested channels
    -	// as children, therefore, this field is unused.
    -	NestedChans map[int64]string
    -	// SubChans tracks the subchannel type children of this subchannel in the format of a
    -	// map from subchannel channelz id to corresponding reference string.
    -	// Note current grpc implementation doesn't allow subchannel to have subchannels
    -	// as children, therefore, this field is unused.
    -	SubChans map[int64]string
    -	// Sockets tracks the socket type children of this subchannel in the format of a map
    -	// from socket channelz id to corresponding reference string.
    -	Sockets map[int64]string
    -	// Trace contains the most recent traced events.
    -	Trace *ChannelTrace
    -}
    -
    -// ChannelInternalMetric defines the struct that the implementor of Channel interface
    -// should return from ChannelzMetric().
    -type ChannelInternalMetric struct {
    -	// current connectivity state of the channel.
    -	State connectivity.State
    -	// The target this channel originally tried to connect to.  May be absent
    -	Target string
    -	// The number of calls started on the channel.
    -	CallsStarted int64
    -	// The number of calls that have completed with an OK status.
    -	CallsSucceeded int64
    -	// The number of calls that have a completed with a non-OK status.
    -	CallsFailed int64
    -	// The last time a call was started on the channel.
    -	LastCallStartedTimestamp time.Time
    -}
    -
    -// ChannelTrace stores traced events on a channel/subchannel and related info.
    -type ChannelTrace struct {
    -	// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
    -	EventNum int64
    -	// CreationTime is the creation time of the trace.
    -	CreationTime time.Time
    -	// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
    -	// oldest one)
    -	Events []*TraceEvent
    -}
    -
    -// TraceEvent represent a single trace event
    -type TraceEvent struct {
    -	// Desc is a simple description of the trace event.
    -	Desc string
    -	// Severity states the severity of this trace event.
    -	Severity Severity
    -	// Timestamp is the event time.
    -	Timestamp time.Time
    -	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
    -	// involved in this event.
    -	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
    -	RefID int64
    -	// RefName is the reference name for the entity that gets referenced in the event.
    -	RefName string
    -	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
    -	RefType RefChannelType
    -}
    -
    -// Channel is the interface that should be satisfied in order to be tracked by
    -// channelz as Channel or SubChannel.
    -type Channel interface {
    -	ChannelzMetric() *ChannelInternalMetric
    -}
    -
    -type dummyChannel struct{}
    -
    -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
    -	return &ChannelInternalMetric{}
    -}
    -
    -type channel struct {
    -	refName     string
    -	c           Channel
    -	closeCalled bool
    -	nestedChans map[int64]string
    -	subChans    map[int64]string
    -	id          int64
    -	pid         int64
    -	cm          *channelMap
    -	trace       *channelTrace
    -	// traceRefCount is the number of trace events that reference this channel.
    -	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
    -	traceRefCount int32
    -}
    -
    -func (c *channel) addChild(id int64, e entry) {
    -	switch v := e.(type) {
    -	case *subChannel:
    -		c.subChans[id] = v.refName
    -	case *channel:
    -		c.nestedChans[id] = v.refName
    -	default:
    -		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
    -	}
    -}
    -
    -func (c *channel) deleteChild(id int64) {
    -	delete(c.subChans, id)
    -	delete(c.nestedChans, id)
    -	c.deleteSelfIfReady()
    -}
    -
    -func (c *channel) triggerDelete() {
    -	c.closeCalled = true
    -	c.deleteSelfIfReady()
    -}
    -
    -func (c *channel) getParentID() int64 {
    -	return c.pid
    -}
    -
    -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
    -// deleting the channel reference from its parent's child list.
    -//
    -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
    -// corresponding grpc object has been invoked, and the channel does not have any children left.
    -//
    -// The returned boolean value indicates whether the channel has been successfully deleted from tree.
    -func (c *channel) deleteSelfFromTree() (deleted bool) {
    -	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
    -		return false
    -	}
    -	// not top channel
    -	if c.pid != 0 {
    -		c.cm.findEntry(c.pid).deleteChild(c.id)
    -	}
    -	return true
    -}
    -
    -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
    -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
    -// channel, and its memory will be garbage collected.
    -//
    -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
    -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
    -// the trace of the referenced entity must not be deleted. In order to release the resource allocated
    -// by grpc, the reference to the grpc object is reset to a dummy object.
    -//
    -// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
    -//
    -// It returns a bool to indicate whether the channel can be safely deleted from map.
    -func (c *channel) deleteSelfFromMap() (delete bool) {
    -	if c.getTraceRefCount() != 0 {
    -		c.c = &dummyChannel{}
    -		return false
    -	}
    -	return true
    -}
    -
    -// deleteSelfIfReady tries to delete the channel itself from the channelz database.
    -// The delete process includes two steps:
    -//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
    -//     parent's child list.
    -//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
    -//     will return entry not found error.
    -func (c *channel) deleteSelfIfReady() {
    -	if !c.deleteSelfFromTree() {
    -		return
    -	}
    -	if !c.deleteSelfFromMap() {
    -		return
    -	}
    -	c.cm.deleteEntry(c.id)
    -	c.trace.clear()
    -}
    -
    -func (c *channel) getChannelTrace() *channelTrace {
    -	return c.trace
    -}
    -
    -func (c *channel) incrTraceRefCount() {
    -	atomic.AddInt32(&c.traceRefCount, 1)
    -}
    -
    -func (c *channel) decrTraceRefCount() {
    -	atomic.AddInt32(&c.traceRefCount, -1)
    -}
    -
    -func (c *channel) getTraceRefCount() int {
    -	i := atomic.LoadInt32(&c.traceRefCount)
    -	return int(i)
    -}
    -
    -func (c *channel) getRefName() string {
    -	return c.refName
    -}
    -
    -type subChannel struct {
    -	refName       string
    -	c             Channel
    -	closeCalled   bool
    -	sockets       map[int64]string
    -	id            int64
    -	pid           int64
    -	cm            *channelMap
    -	trace         *channelTrace
    -	traceRefCount int32
    -}
    -
    -func (sc *subChannel) addChild(id int64, e entry) {
    -	if v, ok := e.(*normalSocket); ok {
    -		sc.sockets[id] = v.refName
    -	} else {
    -		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
    -	}
    -}
    -
    -func (sc *subChannel) deleteChild(id int64) {
    -	delete(sc.sockets, id)
    -	sc.deleteSelfIfReady()
    -}
    -
    -func (sc *subChannel) triggerDelete() {
    -	sc.closeCalled = true
    -	sc.deleteSelfIfReady()
    -}
    -
    -func (sc *subChannel) getParentID() int64 {
    -	return sc.pid
    -}
    -
    -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
    -// means deleting the subchannel reference from its parent's child list.
    -//
    -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
    -// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
    -//
    -// The returned boolean value indicates whether the channel has been successfully deleted from tree.
    -func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
    -	if !sc.closeCalled || len(sc.sockets) != 0 {
    -		return false
    -	}
    -	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
    -	return true
    -}
    -
    -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
    -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
    -// the subchannel, and its memory will be garbage collected.
    -//
    -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
    -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
    -// the trace of the referenced entity must not be deleted. In order to release the resource allocated
    -// by grpc, the reference to the grpc object is reset to a dummy object.
    -//
    -// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
    -//
    -// It returns a bool to indicate whether the channel can be safely deleted from map.
    -func (sc *subChannel) deleteSelfFromMap() (delete bool) {
    -	if sc.getTraceRefCount() != 0 {
    -		// free the grpc struct (i.e. addrConn)
    -		sc.c = &dummyChannel{}
    -		return false
    -	}
    -	return true
    -}
    -
    -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
    -// The delete process includes two steps:
    -//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
    -//     its parent's child list.
    -//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
    -//     by id will return entry not found error.
    -func (sc *subChannel) deleteSelfIfReady() {
    -	if !sc.deleteSelfFromTree() {
    -		return
    -	}
    -	if !sc.deleteSelfFromMap() {
    -		return
    -	}
    -	sc.cm.deleteEntry(sc.id)
    -	sc.trace.clear()
    -}
    -
    -func (sc *subChannel) getChannelTrace() *channelTrace {
    -	return sc.trace
    -}
    -
    -func (sc *subChannel) incrTraceRefCount() {
    -	atomic.AddInt32(&sc.traceRefCount, 1)
    -}
    -
    -func (sc *subChannel) decrTraceRefCount() {
    -	atomic.AddInt32(&sc.traceRefCount, -1)
    -}
    -
    -func (sc *subChannel) getTraceRefCount() int {
    -	i := atomic.LoadInt32(&sc.traceRefCount)
    -	return int(i)
    -}
    -
    -func (sc *subChannel) getRefName() string {
    -	return sc.refName
    -}
    -
    -// SocketMetric defines the info channelz provides for a specific Socket, which
    -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
    -type SocketMetric struct {
    -	// ID is the channelz id of this socket.
    -	ID int64
    -	// RefName is the human readable reference string of this socket.
    -	RefName string
    -	// SocketData contains socket internal metric reported by the socket through
    -	// ChannelzMetric().
    -	SocketData *SocketInternalMetric
    -}
    -
    -// SocketInternalMetric defines the struct that the implementor of Socket interface
    -// should return from ChannelzMetric().
    -type SocketInternalMetric struct {
    -	// The number of streams that have been started.
    -	StreamsStarted int64
    -	// The number of streams that have ended successfully:
    -	// On client side, receiving frame with eos bit set.
    -	// On server side, sending frame with eos bit set.
    -	StreamsSucceeded int64
    -	// The number of streams that have ended unsuccessfully:
    -	// On client side, termination without receiving frame with eos bit set.
    -	// On server side, termination without sending frame with eos bit set.
    -	StreamsFailed int64
    -	// The number of messages successfully sent on this socket.
    -	MessagesSent     int64
    -	MessagesReceived int64
    -	// The number of keep alives sent.  This is typically implemented with HTTP/2
    -	// ping messages.
    -	KeepAlivesSent int64
    -	// The last time a stream was created by this endpoint.  Usually unset for
    -	// servers.
    -	LastLocalStreamCreatedTimestamp time.Time
    -	// The last time a stream was created by the remote endpoint.  Usually unset
    -	// for clients.
    -	LastRemoteStreamCreatedTimestamp time.Time
    -	// The last time a message was sent by this endpoint.
    -	LastMessageSentTimestamp time.Time
    -	// The last time a message was received by this endpoint.
    -	LastMessageReceivedTimestamp time.Time
    -	// The amount of window, granted to the local endpoint by the remote endpoint.
    -	// This may be slightly out of date due to network latency.  This does NOT
    -	// include stream level or TCP level flow control info.
    -	LocalFlowControlWindow int64
    -	// The amount of window, granted to the remote endpoint by the local endpoint.
    -	// This may be slightly out of date due to network latency.  This does NOT
    -	// include stream level or TCP level flow control info.
    -	RemoteFlowControlWindow int64
    -	// The locally bound address.
    -	LocalAddr net.Addr
    -	// The remote bound address.  May be absent.
    -	RemoteAddr net.Addr
    -	// Optional, represents the name of the remote endpoint, if different than
    -	// the original target name.
    -	RemoteName    string
    -	SocketOptions *SocketOptionData
    -	Security      credentials.ChannelzSecurityValue
    -}
    -
    -// Socket is the interface that should be satisfied in order to be tracked by
    -// channelz as Socket.
    -type Socket interface {
    -	ChannelzMetric() *SocketInternalMetric
    -}
    -
    -type listenSocket struct {
    -	refName string
    -	s       Socket
    -	id      int64
    -	pid     int64
    -	cm      *channelMap
    -}
    -
    -func (ls *listenSocket) addChild(id int64, e entry) {
    -	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
    -}
    -
    -func (ls *listenSocket) deleteChild(id int64) {
    -	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
    -}
    -
    -func (ls *listenSocket) triggerDelete() {
    -	ls.cm.deleteEntry(ls.id)
    -	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
    -}
    -
    -func (ls *listenSocket) deleteSelfIfReady() {
    -	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
    -}
    -
    -func (ls *listenSocket) getParentID() int64 {
    -	return ls.pid
    -}
    -
    -type normalSocket struct {
    -	refName string
    -	s       Socket
    -	id      int64
    -	pid     int64
    -	cm      *channelMap
    -}
    -
    -func (ns *normalSocket) addChild(id int64, e entry) {
    -	logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
    -}
    -
    -func (ns *normalSocket) deleteChild(id int64) {
    -	logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
    -}
    -
    -func (ns *normalSocket) triggerDelete() {
    -	ns.cm.deleteEntry(ns.id)
    -	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
    -}
    -
    -func (ns *normalSocket) deleteSelfIfReady() {
    -	logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
    -}
    -
    -func (ns *normalSocket) getParentID() int64 {
    -	return ns.pid
    -}
    -
    -// ServerMetric defines the info channelz provides for a specific Server, which
    -// includes ServerInternalMetric and channelz-specific data, such as channelz id,
    -// child list, etc.
    -type ServerMetric struct {
    -	// ID is the channelz id of this server.
    -	ID int64
    -	// RefName is the human readable reference string of this server.
    -	RefName string
    -	// ServerData contains server internal metric reported by the server through
    -	// ChannelzMetric().
    -	ServerData *ServerInternalMetric
    -	// ListenSockets tracks the listener socket type children of this server in the
    -	// format of a map from socket channelz id to corresponding reference string.
    -	ListenSockets map[int64]string
    -}
    -
    -// ServerInternalMetric defines the struct that the implementor of Server interface
    -// should return from ChannelzMetric().
    -type ServerInternalMetric struct {
    -	// The number of incoming calls started on the server.
    -	CallsStarted int64
    -	// The number of incoming calls that have completed with an OK status.
    -	CallsSucceeded int64
    -	// The number of incoming calls that have a completed with a non-OK status.
    -	CallsFailed int64
    -	// The last time a call was started on the server.
    -	LastCallStartedTimestamp time.Time
    -}
    -
    -// Server is the interface to be satisfied in order to be tracked by channelz as
    -// Server.
    -type Server interface {
    -	ChannelzMetric() *ServerInternalMetric
    -}
    -
    -type server struct {
    -	refName       string
    -	s             Server
    -	closeCalled   bool
    -	sockets       map[int64]string
    -	listenSockets map[int64]string
    -	id            int64
    -	cm            *channelMap
    -}
    -
    -func (s *server) addChild(id int64, e entry) {
    -	switch v := e.(type) {
    -	case *normalSocket:
    -		s.sockets[id] = v.refName
    -	case *listenSocket:
    -		s.listenSockets[id] = v.refName
    -	default:
    -		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
    -	}
    -}
    -
    -func (s *server) deleteChild(id int64) {
    -	delete(s.sockets, id)
    -	delete(s.listenSockets, id)
    -	s.deleteSelfIfReady()
    -}
    -
    -func (s *server) triggerDelete() {
    -	s.closeCalled = true
    -	s.deleteSelfIfReady()
    -}
    -
    -func (s *server) deleteSelfIfReady() {
    -	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
    -		return
    -	}
    -	s.cm.deleteEntry(s.id)
    -}
    -
    -func (s *server) getParentID() int64 {
    -	return 0
    -}
    -
    -type tracedChannel interface {
    -	getChannelTrace() *channelTrace
    -	incrTraceRefCount()
    -	decrTraceRefCount()
    -	getRefName() string
    -}
    -
    -type channelTrace struct {
    -	cm          *channelMap
    -	clearCalled bool
    -	createdTime time.Time
    -	eventCount  int64
    -	mu          sync.Mutex
    -	events      []*TraceEvent
    -}
    -
    -func (c *channelTrace) append(e *TraceEvent) {
    -	c.mu.Lock()
    -	if len(c.events) == getMaxTraceEntry() {
    -		del := c.events[0]
    -		c.events = c.events[1:]
    -		if del.RefID != 0 {
    -			// start recursive cleanup in a goroutine to not block the call originated from grpc.
    -			go func() {
    -				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
    -				c.cm.mu.Lock()
    -				c.cm.decrTraceRefCount(del.RefID)
    -				c.cm.mu.Unlock()
    -			}()
    -		}
    -	}
    -	e.Timestamp = time.Now()
    -	c.events = append(c.events, e)
    -	c.eventCount++
    -	c.mu.Unlock()
    -}
    -
    -func (c *channelTrace) clear() {
    -	if c.clearCalled {
    -		return
    -	}
    -	c.clearCalled = true
    -	c.mu.Lock()
    -	for _, e := range c.events {
    -		if e.RefID != 0 {
    -			// caller should have already held the c.cm.mu lock.
    -			c.cm.decrTraceRefCount(e.RefID)
    -		}
    -	}
    -	c.mu.Unlock()
    -}
    -
    -// Severity is the severity level of a trace event.
    -// The canonical enumeration of all valid values is here:
    -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
    -type Severity int
    -
    -const (
    -	// CtUnknown indicates unknown severity of a trace event.
    -	CtUnknown Severity = iota
    -	// CtInfo indicates info level severity of a trace event.
    -	CtInfo
    -	// CtWarning indicates warning level severity of a trace event.
    -	CtWarning
    -	// CtError indicates error level severity of a trace event.
    -	CtError
    -)
    -
    -// RefChannelType is the type of the entity being referenced in a trace event.
    -type RefChannelType int
    -
    -const (
    -	// RefUnknown indicates an unknown entity type, the zero value for this type.
    -	RefUnknown RefChannelType = iota
    -	// RefChannel indicates the referenced entity is a Channel.
    -	RefChannel
    -	// RefSubChannel indicates the referenced entity is a SubChannel.
    -	RefSubChannel
    -	// RefServer indicates the referenced entity is a Server.
    -	RefServer
    -	// RefListenSocket indicates the referenced entity is a ListenSocket.
    -	RefListenSocket
    -	// RefNormalSocket indicates the referenced entity is a NormalSocket.
    -	RefNormalSocket
    -)
    -
    -var refChannelTypeToString = map[RefChannelType]string{
    -	RefUnknown:      "Unknown",
    -	RefChannel:      "Channel",
    -	RefSubChannel:   "SubChannel",
    -	RefServer:       "Server",
    -	RefListenSocket: "ListenSocket",
    -	RefNormalSocket: "NormalSocket",
    -}
    -
    -func (r RefChannelType) String() string {
    -	return refChannelTypeToString[r]
    -}
    -
    -func (c *channelTrace) dumpData() *ChannelTrace {
    -	c.mu.Lock()
    -	ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
    -	ct.Events = c.events[:len(c.events)]
    -	c.mu.Unlock()
    -	return ct
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
    deleted file mode 100644
    index 1b1c4cce34..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
    +++ /dev/null
    @@ -1,51 +0,0 @@
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -import (
    -	"syscall"
    -
    -	"golang.org/x/sys/unix"
    -)
    -
    -// SocketOptionData defines the struct to hold socket option data, and related
    -// getter function to obtain info from fd.
    -type SocketOptionData struct {
    -	Linger      *unix.Linger
    -	RecvTimeout *unix.Timeval
    -	SendTimeout *unix.Timeval
    -	TCPInfo     *unix.TCPInfo
    -}
    -
    -// Getsockopt defines the function to get socket options requested by channelz.
    -// It is to be passed to syscall.RawConn.Control().
    -func (s *SocketOptionData) Getsockopt(fd uintptr) {
    -	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
    -		s.Linger = v
    -	}
    -	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
    -		s.RecvTimeout = v
    -	}
    -	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
    -		s.SendTimeout = v
    -	}
    -	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
    -		s.TCPInfo = v
    -	}
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
    deleted file mode 100644
    index 8b06eed1ab..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -//go:build !linux
    -// +build !linux
    -
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -import (
    -	"sync"
    -)
    -
    -var once sync.Once
    -
    -// SocketOptionData defines the struct to hold socket option data, and related
    -// getter function to obtain info from fd.
    -// Windows OS doesn't support Socket Option
    -type SocketOptionData struct {
    -}
    -
    -// Getsockopt defines the function to get socket options requested by channelz.
    -// It is to be passed to syscall.RawConn.Control().
    -// Windows OS doesn't support Socket Option
    -func (s *SocketOptionData) Getsockopt(fd uintptr) {
    -	once.Do(func() {
    -		logger.Warning("Channelz: socket options are not supported on non-linux environments")
    -	})
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
    deleted file mode 100644
    index 98288c3f86..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
    +++ /dev/null
    @@ -1,37 +0,0 @@
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -import (
    -	"syscall"
    -)
    -
    -// GetSocketOption gets the socket option info of the conn.
    -func GetSocketOption(socket any) *SocketOptionData {
    -	c, ok := socket.(syscall.Conn)
    -	if !ok {
    -		return nil
    -	}
    -	data := &SocketOptionData{}
    -	if rawConn, err := c.SyscallConn(); err == nil {
    -		rawConn.Control(data.Getsockopt)
    -		return data
    -	}
    -	return nil
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
    deleted file mode 100644
    index b5568b22e2..0000000000
    --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
    +++ /dev/null
    @@ -1,27 +0,0 @@
    -//go:build !linux
    -// +build !linux
    -
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package channelz
    -
    -// GetSocketOption gets the socket option info of the conn.
    -func GetSocketOption(c any) *SocketOptionData {
    -	return nil
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    index 685a3cb41b..6e7dd6b772 100644
    --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
    @@ -28,9 +28,6 @@ import (
     var (
     	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
     	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
    -	// AdvertiseCompressors is set if registered compressor should be advertised
    -	// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
    -	AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
     	// RingHashCap indicates the maximum ring size which defaults to 4096
     	// entries but may be overridden by setting the environment variable
     	// "GRPC_RING_HASH_CAP".  This does not override the default bounds
    @@ -43,6 +40,21 @@ var (
     	// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
     	// handshakes that can be performed.
     	ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
    +	// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
    +	// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
    +	// option is present for backward compatibility. This option may be overridden
    +	// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
    +	// or "false".
    +	EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
    +	// XDSFallbackSupport is the env variable that controls whether support for
    +	// xDS fallback is turned on. If this is unset or is false, only the first
    +	// xDS server in the list of server configs will be used.
    +	XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
    +	// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
    +	// instead of the exiting pickfirst implementation. This can be enabled by
    +	// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
    +	// to "true".
    +	NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
     )
     
     func boolFromEnv(envVar string, def bool) bool {
    diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
    index 7f7044e173..7617be2158 100644
    --- a/vendor/google.golang.org/grpc/internal/experimental.go
    +++ b/vendor/google.golang.org/grpc/internal/experimental.go
    @@ -18,11 +18,11 @@
     package internal
     
     var (
    -	// WithRecvBufferPool is implemented by the grpc package and returns a dial
    +	// WithBufferPool is implemented by the grpc package and returns a dial
     	// option to configure a shared buffer pool for a grpc.ClientConn.
    -	WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
    +	WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
     
    -	// RecvBufferPool is implemented by the grpc package and returns a server
    +	// BufferPool is implemented by the grpc package and returns a server
     	// option to configure a shared buffer pool for a grpc.Server.
    -	RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
    +	BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
     )
    diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
    index 6717b757f8..43423d8ad9 100644
    --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
    +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
    @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool {
     		name = strings.TrimSpace(name)
     		return name == "Google" || name == "Google Compute Engine"
     	case "windows":
    -		name = strings.Replace(name, " ", "", -1)
    -		name = strings.Replace(name, "\n", "", -1)
    -		name = strings.Replace(name, "\r", "", -1)
    +		name = strings.ReplaceAll(name, " ", "")
    +		name = strings.ReplaceAll(name, "\n", "")
    +		name = strings.ReplaceAll(name, "\r", "")
     		return name == "Google"
     	default:
     		return false
    diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
    deleted file mode 100644
    index bfc45102ab..0000000000
    --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
    +++ /dev/null
    @@ -1,126 +0,0 @@
    -/*
    - *
    - * Copyright 2020 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -// Package grpclog (internal) defines depth logging for grpc.
    -package grpclog
    -
    -import (
    -	"os"
    -)
    -
    -// Logger is the logger used for the non-depth log functions.
    -var Logger LoggerV2
    -
    -// DepthLogger is the logger used for the depth log functions.
    -var DepthLogger DepthLoggerV2
    -
    -// InfoDepth logs to the INFO log at the specified depth.
    -func InfoDepth(depth int, args ...any) {
    -	if DepthLogger != nil {
    -		DepthLogger.InfoDepth(depth, args...)
    -	} else {
    -		Logger.Infoln(args...)
    -	}
    -}
    -
    -// WarningDepth logs to the WARNING log at the specified depth.
    -func WarningDepth(depth int, args ...any) {
    -	if DepthLogger != nil {
    -		DepthLogger.WarningDepth(depth, args...)
    -	} else {
    -		Logger.Warningln(args...)
    -	}
    -}
    -
    -// ErrorDepth logs to the ERROR log at the specified depth.
    -func ErrorDepth(depth int, args ...any) {
    -	if DepthLogger != nil {
    -		DepthLogger.ErrorDepth(depth, args...)
    -	} else {
    -		Logger.Errorln(args...)
    -	}
    -}
    -
    -// FatalDepth logs to the FATAL log at the specified depth.
    -func FatalDepth(depth int, args ...any) {
    -	if DepthLogger != nil {
    -		DepthLogger.FatalDepth(depth, args...)
    -	} else {
    -		Logger.Fatalln(args...)
    -	}
    -	os.Exit(1)
    -}
    -
    -// LoggerV2 does underlying logging work for grpclog.
    -// This is a copy of the LoggerV2 defined in the external grpclog package. It
    -// is defined here to avoid a circular dependency.
    -type LoggerV2 interface {
    -	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
    -	Info(args ...any)
    -	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
    -	Infoln(args ...any)
    -	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
    -	Infof(format string, args ...any)
    -	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
    -	Warning(args ...any)
    -	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
    -	Warningln(args ...any)
    -	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
    -	Warningf(format string, args ...any)
    -	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    -	Error(args ...any)
    -	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    -	Errorln(args ...any)
    -	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    -	Errorf(format string, args ...any)
    -	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatal(args ...any)
    -	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatalln(args ...any)
    -	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
    -	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
    -	// Implementations may also call os.Exit() with a non-zero exit code.
    -	Fatalf(format string, args ...any)
    -	// V reports whether verbosity level l is at least the requested verbose level.
    -	V(l int) bool
    -}
    -
    -// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
    -// DepthLoggerV2, the below functions will be called with the appropriate stack
    -// depth set for trivial functions the logger may ignore.
    -// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
    -// It is defined here to avoid a circular dependency.
    -//
    -// # Experimental
    -//
    -// Notice: This type is EXPERIMENTAL and may be changed or removed in a
    -// later release.
    -type DepthLoggerV2 interface {
    -	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	InfoDepth(depth int, args ...any)
    -	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	WarningDepth(depth int, args ...any)
    -	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	ErrorDepth(depth int, args ...any)
    -	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
    -	FatalDepth(depth int, args ...any)
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
    deleted file mode 100644
    index faa998de76..0000000000
    --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
    +++ /dev/null
    @@ -1,93 +0,0 @@
    -/*
    - *
    - * Copyright 2020 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package grpclog
    -
    -import (
    -	"fmt"
    -)
    -
    -// PrefixLogger does logging with a prefix.
    -//
    -// Logging method on a nil logs without any prefix.
    -type PrefixLogger struct {
    -	logger DepthLoggerV2
    -	prefix string
    -}
    -
    -// Infof does info logging.
    -func (pl *PrefixLogger) Infof(format string, args ...any) {
    -	if pl != nil {
    -		// Handle nil, so the tests can pass in a nil logger.
    -		format = pl.prefix + format
    -		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
    -		return
    -	}
    -	InfoDepth(1, fmt.Sprintf(format, args...))
    -}
    -
    -// Warningf does warning logging.
    -func (pl *PrefixLogger) Warningf(format string, args ...any) {
    -	if pl != nil {
    -		format = pl.prefix + format
    -		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
    -		return
    -	}
    -	WarningDepth(1, fmt.Sprintf(format, args...))
    -}
    -
    -// Errorf does error logging.
    -func (pl *PrefixLogger) Errorf(format string, args ...any) {
    -	if pl != nil {
    -		format = pl.prefix + format
    -		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
    -		return
    -	}
    -	ErrorDepth(1, fmt.Sprintf(format, args...))
    -}
    -
    -// Debugf does info logging at verbose level 2.
    -func (pl *PrefixLogger) Debugf(format string, args ...any) {
    -	// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
    -	// rewrite PrefixLogger a little to ensure that we don't use the global
    -	// `Logger` here, and instead use the `logger` field.
    -	if !Logger.V(2) {
    -		return
    -	}
    -	if pl != nil {
    -		// Handle nil, so the tests can pass in a nil logger.
    -		format = pl.prefix + format
    -		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
    -		return
    -	}
    -	InfoDepth(1, fmt.Sprintf(format, args...))
    -
    -}
    -
    -// V reports whether verbosity level l is at least the requested verbose level.
    -func (pl *PrefixLogger) V(l int) bool {
    -	// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
    -	// rewrite PrefixLogger a little to ensure that we don't use the global
    -	// `Logger` here, and instead use the `logger` field.
    -	return Logger.V(l)
    -}
    -
    -// NewPrefixLogger creates a prefix logger with the given prefix.
    -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
    -	return &PrefixLogger{logger: logger, prefix: prefix}
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
    new file mode 100644
    index 0000000000..092ad187a2
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
    @@ -0,0 +1,79 @@
    +/*
    + *
    + * Copyright 2020 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package grpclog provides logging functionality for internal gRPC packages,
    +// outside of the functionality provided by the external `grpclog` package.
    +package grpclog
    +
    +import (
    +	"fmt"
    +
    +	"google.golang.org/grpc/grpclog"
    +)
    +
    +// PrefixLogger does logging with a prefix.
    +//
    +// Logging method on a nil logs without any prefix.
    +type PrefixLogger struct {
    +	logger grpclog.DepthLoggerV2
    +	prefix string
    +}
    +
    +// Infof does info logging.
    +func (pl *PrefixLogger) Infof(format string, args ...any) {
    +	if pl != nil {
    +		// Handle nil, so the tests can pass in a nil logger.
    +		format = pl.prefix + format
    +		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
    +		return
    +	}
    +	grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
    +}
    +
    +// Warningf does warning logging.
    +func (pl *PrefixLogger) Warningf(format string, args ...any) {
    +	if pl != nil {
    +		format = pl.prefix + format
    +		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
    +		return
    +	}
    +	grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
    +}
    +
    +// Errorf does error logging.
    +func (pl *PrefixLogger) Errorf(format string, args ...any) {
    +	if pl != nil {
    +		format = pl.prefix + format
    +		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
    +		return
    +	}
    +	grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
    +}
    +
    +// V reports whether verbosity level l is at least the requested verbose level.
    +func (pl *PrefixLogger) V(l int) bool {
    +	if pl != nil {
    +		return pl.logger.V(l)
    +	}
    +	return true
    +}
    +
    +// NewPrefixLogger creates a prefix logger with the given prefix.
    +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
    +	return &PrefixLogger{logger: logger, prefix: prefix}
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
    deleted file mode 100644
    index aa97273e7d..0000000000
    --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
    +++ /dev/null
    @@ -1,95 +0,0 @@
    -/*
    - *
    - * Copyright 2018 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -// Package grpcrand implements math/rand functions in a concurrent-safe way
    -// with a global random source, independent of math/rand's global source.
    -package grpcrand
    -
    -import (
    -	"math/rand"
    -	"sync"
    -	"time"
    -)
    -
    -var (
    -	r  = rand.New(rand.NewSource(time.Now().UnixNano()))
    -	mu sync.Mutex
    -)
    -
    -// Int implements rand.Int on the grpcrand global source.
    -func Int() int {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Int()
    -}
    -
    -// Int63n implements rand.Int63n on the grpcrand global source.
    -func Int63n(n int64) int64 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Int63n(n)
    -}
    -
    -// Intn implements rand.Intn on the grpcrand global source.
    -func Intn(n int) int {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Intn(n)
    -}
    -
    -// Int31n implements rand.Int31n on the grpcrand global source.
    -func Int31n(n int32) int32 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Int31n(n)
    -}
    -
    -// Float64 implements rand.Float64 on the grpcrand global source.
    -func Float64() float64 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Float64()
    -}
    -
    -// Uint64 implements rand.Uint64 on the grpcrand global source.
    -func Uint64() uint64 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Uint64()
    -}
    -
    -// Uint32 implements rand.Uint32 on the grpcrand global source.
    -func Uint32() uint32 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.Uint32()
    -}
    -
    -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
    -func ExpFloat64() float64 {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	return r.ExpFloat64()
    -}
    -
    -// Shuffle implements rand.Shuffle on the grpcrand global source.
    -var Shuffle = func(n int, f func(int, int)) {
    -	mu.Lock()
    -	defer mu.Unlock()
    -	r.Shuffle(n, f)
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    index f7f40a16ac..8e8e861280 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
    @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
     	return cs
     }
     
    -// Schedule adds a callback to be scheduled after existing callbacks are run.
    +// TrySchedule tries to schedule the provided callback function f to be
    +// executed in the order it was added. This is a best-effort operation. If the
    +// context passed to NewCallbackSerializer was canceled before this method is
    +// called, the callback will not be scheduled.
     //
     // Callbacks are expected to honor the context when performing any blocking
     // operations, and should return early when the context is canceled.
    +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
    +	cs.callbacks.Put(f)
    +}
    +
    +// ScheduleOr schedules the provided callback function f to be executed in the
    +// order it was added. If the context passed to NewCallbackSerializer has been
    +// canceled before this method is called, the onFailure callback will be
    +// executed inline instead.
     //
    -// Return value indicates if the callback was successfully added to the list of
    -// callbacks to be executed by the serializer. It is not possible to add
    -// callbacks once the context passed to NewCallbackSerializer is cancelled.
    -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
    -	return cs.callbacks.Put(f) == nil
    +// Callbacks are expected to honor the context when performing any blocking
    +// operations, and should return early when the context is canceled.
    +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
    +	if cs.callbacks.Put(f) != nil {
    +		onFailure()
    +	}
     }
     
     func (cs *CallbackSerializer) run(ctx context.Context) {
    diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
    index aef8cec1ab..6d8c2f518d 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
    @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
     
     	if ps.msg != nil {
     		msg := ps.msg
    -		ps.cs.Schedule(func(context.Context) {
    +		ps.cs.TrySchedule(func(context.Context) {
     			ps.mu.Lock()
     			defer ps.mu.Unlock()
     			if !ps.subscribers[sub] {
    @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
     	ps.msg = msg
     	for sub := range ps.subscribers {
     		s := sub
    -		ps.cs.Schedule(func(context.Context) {
    +		ps.cs.TrySchedule(func(context.Context) {
     			ps.mu.Lock()
     			defer ps.mu.Unlock()
     			if !ps.subscribers[s] {
    diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
    index 9f40909679..e8d866984b 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
    @@ -20,8 +20,6 @@ package grpcutil
     
     import (
     	"strings"
    -
    -	"google.golang.org/grpc/internal/envconfig"
     )
     
     // RegisteredCompressorNames holds names of the registered compressors.
    @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool {
     // RegisteredCompressors returns a string of registered compressor names
     // separated by comma.
     func RegisteredCompressors() string {
    -	if !envconfig.AdvertiseCompressors {
    -		return ""
    -	}
     	return strings.Join(RegisteredCompressorNames, ",")
     }
    diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
    index ec62b4775e..683d1955c6 100644
    --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
    +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
    @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
     }
     
     // baseContentType is the base content-type for gRPC.  This is a valid
    -// content-type on it's own, but can also include a content-subtype such as
    +// content-type on its own, but can also include a content-subtype such as
     // "proto" as a suffix after "+" or ";".  See
     // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
     // for more details.
    diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
    index fe49cb74c5..2c13ee9dac 100644
    --- a/vendor/google.golang.org/grpc/internal/idle/idle.go
    +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
    @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool {
     	return true
     }
     
    +// EnterIdleModeForTesting instructs the channel to enter idle mode.
     func (m *Manager) EnterIdleModeForTesting() {
     	m.tryEnterIdleMode()
     }
    @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
     		//   came in and OnCallBegin() noticed that the calls count is negative.
     		// - Channel is in idle mode, and multiple new RPCs come in at the same
     		//   time, all of them notice a negative calls count in OnCallBegin and get
    -		//   here. The first one to get the lock would got the channel to exit idle.
    +		//   here. The first one to get the lock would get the channel to exit idle.
     		// - Channel is not in idle mode, and the user calls Connect which calls
     		//   m.ExitIdleMode.
     		//
    @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool {
     	return atomic.LoadInt32(&m.closed) == 1
     }
     
    +// Close stops the timer associated with the Manager, if it exists.
     func (m *Manager) Close() {
     	atomic.StoreInt32(&m.closed, 1)
     
    diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
    index 6c7ea6a533..3afc181344 100644
    --- a/vendor/google.golang.org/grpc/internal/internal.go
    +++ b/vendor/google.golang.org/grpc/internal/internal.go
    @@ -29,8 +29,6 @@ import (
     )
     
     var (
    -	// WithHealthCheckFunc is set by dialoptions.go
    -	WithHealthCheckFunc any // func (HealthChecker) DialOption
     	// HealthCheckFunc is used to provide client-side LB channel health checking
     	HealthCheckFunc HealthChecker
     	// BalancerUnregister is exported by package balancer to unregister a balancer.
    @@ -106,6 +104,14 @@ var (
     	// This is used in the 1.0 release of gcp/observability, and thus must not be
     	// deleted or changed.
     	ClearGlobalDialOptions func()
    +
    +	// AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
    +	// configured for newly created ClientConns.
    +	AddGlobalPerTargetDialOptions any // func (opt any)
    +	// ClearGlobalPerTargetDialOptions clears the slice of global late apply
    +	// dial options.
    +	ClearGlobalPerTargetDialOptions func()
    +
     	// JoinDialOptions combines the dial options passed as arguments into a
     	// single dial option.
     	JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
    @@ -126,7 +132,8 @@ var (
     	// deleted or changed.
     	BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
     
    -	// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
    +	// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
    +	// provided grpc.ClientConn.
     	SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
     
     	// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
    @@ -140,6 +147,20 @@ var (
     	// other features, including the CSDS service.
     	NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
     
    +	// NewXDSResolverWithClientForTesting creates a new xDS resolver builder
    +	// using the provided xDS client instead of creating a new one using the
    +	// bootstrap configuration specified by the supported environment variables.
    +	// The resolver.Builder is meant to be used in conjunction with the
    +	// grpc.WithResolvers DialOption. The resolver.Builder does not take
    +	// ownership of the provided xDS client and it is the responsibility of the
    +	// caller to close the client when no longer required.
    +	//
    +	// Testing Only
    +	//
    +	// This function should ONLY be used for testing and may not work with some
    +	// other features, including the CSDS service.
    +	NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
    +
     	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
     	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
     	// variable.
    @@ -174,7 +195,7 @@ var (
     
     	// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
     	// metadata to RPCs.
    -	GRPCResolverSchemeExtraMetadata string = "xds"
    +	GRPCResolverSchemeExtraMetadata = "xds"
     
     	// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
     	EnterIdleModeForTesting any // func(*grpc.ClientConn)
    @@ -182,27 +203,49 @@ var (
     	// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
     	ExitIdleModeForTesting any // func(*grpc.ClientConn) error
     
    +	// ChannelzTurnOffForTesting disables the Channelz service for testing
    +	// purposes.
     	ChannelzTurnOffForTesting func()
     
    -	// TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
    -	// error for a given resource type and name. This is usually triggered when
    -	// the associated watch timer fires. For testing purposes, having this
    -	// function makes events more predictable than relying on timer events.
    -	TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
    +	// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
    +	// invoke resource-not-found error for the given resource type and name.
    +	TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
     
    -	// TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton
    -	// to invoke resource not found for a resource type name and resource name.
    -	TriggerXDSResourceNameNotFoundClient any // func(string, string) error
    -
    -	// FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
    +	// FromOutgoingContextRaw returns the un-merged, intermediary contents of
    +	// metadata.rawMD.
     	FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
    +
    +	// UserSetDefaultScheme is set to true if the user has overridden the
    +	// default resolver scheme.
    +	UserSetDefaultScheme = false
    +
    +	// ConnectedAddress returns the connected address for a SubConnState. The
    +	// address is only valid if the state is READY.
    +	ConnectedAddress any // func (scs SubConnState) resolver.Address
    +
    +	// SetConnectedAddress sets the connected address for a SubConnState.
    +	SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
    +
    +	// SnapshotMetricRegistryForTesting snapshots the global data of the metric
    +	// registry. Returns a cleanup function that sets the metric registry to its
    +	// original state. Only called in testing functions.
    +	SnapshotMetricRegistryForTesting func() func()
    +
    +	// SetDefaultBufferPoolForTesting updates the default buffer pool, for
    +	// testing purposes.
    +	SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
    +
    +	// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
    +	// testing purposes.
    +	SetBufferPoolingThresholdForTesting any // func(int)
     )
     
    -// HealthChecker defines the signature of the client-side LB channel health checking function.
    +// HealthChecker defines the signature of the client-side LB channel health
    +// checking function.
     //
     // The implementation is expected to create a health checking RPC stream by
     // calling newStream(), watch for the health status of serviceName, and report
    -// it's health back by calling setConnectivityState().
    +// its health back by calling setConnectivityState().
     //
     // The health checking protocol is defined at:
     // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
    @@ -224,3 +267,9 @@ const (
     // It currently has an experimental suffix which would be removed once
     // end-to-end testing of the policy is completed.
     const RLSLoadBalancingPolicyName = "rls_experimental"
    +
    +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
    +// embedding.
    +type EnforceSubConnEmbedding interface {
    +	enforceSubConnEmbedding()
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
    index 7033191375..dbee7a60d7 100644
    --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go
    +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
    @@ -24,10 +24,8 @@ import (
     	"encoding/json"
     	"fmt"
     
    -	"github.com/golang/protobuf/jsonpb"
    -	protov1 "github.com/golang/protobuf/proto"
     	"google.golang.org/protobuf/encoding/protojson"
    -	protov2 "google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/protoadapt"
     )
     
     const jsonIndent = "  "
    @@ -36,21 +34,14 @@ const jsonIndent = "  "
     //
     // If marshal fails, it falls back to fmt.Sprintf("%+v").
     func ToJSON(e any) string {
    -	switch ee := e.(type) {
    -	case protov1.Message:
    -		mm := jsonpb.Marshaler{Indent: jsonIndent}
    -		ret, err := mm.MarshalToString(ee)
    -		if err != nil {
    -			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
    -			// messages are not imported, and this will fail because the message
    -			// is not found.
    -			return fmt.Sprintf("%+v", ee)
    -		}
    -		return ret
    -	case protov2.Message:
    +	if ee, ok := e.(protoadapt.MessageV1); ok {
    +		e = protoadapt.MessageV2Of(ee)
    +	}
    +
    +	if ee, ok := e.(protoadapt.MessageV2); ok {
     		mm := protojson.MarshalOptions{
    -			Multiline: true,
     			Indent:    jsonIndent,
    +			Multiline: true,
     		}
     		ret, err := mm.Marshal(ee)
     		if err != nil {
    @@ -60,13 +51,13 @@ func ToJSON(e any) string {
     			return fmt.Sprintf("%+v", ee)
     		}
     		return string(ret)
    -	default:
    -		ret, err := json.MarshalIndent(ee, "", jsonIndent)
    -		if err != nil {
    -			return fmt.Sprintf("%+v", ee)
    -		}
    -		return string(ret)
     	}
    +
    +	ret, err := json.MarshalIndent(e, "", jsonIndent)
    +	if err != nil {
    +		return fmt.Sprintf("%+v", e)
    +	}
    +	return string(ret)
     }
     
     // FormatJSON formats the input json bytes with indentation.
    diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    index b66dcb2132..ba5c5a95d0 100644
    --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
    @@ -24,7 +24,9 @@ import (
     	"context"
     	"encoding/json"
     	"fmt"
    +	rand "math/rand/v2"
     	"net"
    +	"net/netip"
     	"os"
     	"strconv"
     	"strings"
    @@ -35,21 +37,35 @@ import (
     	"google.golang.org/grpc/grpclog"
     	"google.golang.org/grpc/internal/backoff"
     	"google.golang.org/grpc/internal/envconfig"
    -	"google.golang.org/grpc/internal/grpcrand"
     	"google.golang.org/grpc/internal/resolver/dns/internal"
     	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/serviceconfig"
     )
     
    -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
    -// addresses from SRV records.  Must not be changed after init time.
    -var EnableSRVLookups = false
    +var (
    +	// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
    +	// addresses from SRV records.  Must not be changed after init time.
    +	EnableSRVLookups = false
     
    -var logger = grpclog.Component("dns")
    +	// MinResolutionInterval is the minimum interval at which re-resolutions are
    +	// allowed. This helps to prevent excessive re-resolution.
    +	MinResolutionInterval = 30 * time.Second
    +
    +	// ResolvingTimeout specifies the maximum duration for a DNS resolution request.
    +	// If the timeout expires before a response is received, the request will be canceled.
    +	//
    +	// It is recommended to set this value at application startup. Avoid modifying this variable
    +	// after initialization as it's not thread-safe for concurrent modification.
    +	ResolvingTimeout = 30 * time.Second
    +
    +	logger = grpclog.Component("dns")
    +)
     
     func init() {
     	resolver.Register(NewBuilder())
     	internal.TimeAfterFunc = time.After
    +	internal.TimeNowFunc = time.Now
    +	internal.TimeUntilFunc = time.Until
     	internal.NewNetResolver = newNetResolver
     	internal.AddressDialer = addressDialer
     }
    @@ -107,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
     	}
     
     	// IP address.
    -	if ipAddr, ok := formatIP(host); ok {
    +	if ipAddr, err := formatIP(host); err == nil {
     		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
     		cc.UpdateState(resolver.State{Addresses: addr})
     		return deadResolver{}, nil
    @@ -162,7 +178,7 @@ type dnsResolver struct {
     	// finished. Otherwise, data race will be possible. [Race Example] in
     	// dns_resolver_test we replace the real lookup functions with mocked ones to
     	// facilitate testing. If Close() doesn't wait for watcher() goroutine
    -	// finishes, race detector sometimes will warns lookup (READ the lookup
    +	// finishes, race detector sometimes will warn lookup (READ the lookup
     	// function pointers) inside watcher() goroutine has data race with
     	// replaceNetFunc (WRITE the lookup function pointers).
     	wg                   sync.WaitGroup
    @@ -196,12 +212,12 @@ func (d *dnsResolver) watcher() {
     			err = d.cc.UpdateState(*state)
     		}
     
    -		var waitTime time.Duration
    +		var nextResolutionTime time.Time
     		if err == nil {
     			// Success resolving, wait for the next ResolveNow. However, also wait 30
     			// seconds at the very least to prevent constantly re-resolving.
     			backoffIndex = 1
    -			waitTime = internal.MinResolutionRate
    +			nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
     			select {
     			case <-d.ctx.Done():
     				return
    @@ -210,29 +226,31 @@ func (d *dnsResolver) watcher() {
     		} else {
     			// Poll on an error found in DNS Resolver or an error received from
     			// ClientConn.
    -			waitTime = backoff.DefaultExponential.Backoff(backoffIndex)
    +			nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
     			backoffIndex++
     		}
     		select {
     		case <-d.ctx.Done():
     			return
    -		case <-internal.TimeAfterFunc(waitTime):
    +		case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
     		}
     	}
     }
     
    -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
    -	if !EnableSRVLookups {
    +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
    +	// Skip this particular host to avoid timeouts with some versions of
    +	// systemd-resolved.
    +	if !EnableSRVLookups || d.host == "metadata.google.internal." {
     		return nil, nil
     	}
     	var newAddrs []resolver.Address
    -	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
    +	_, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
     	if err != nil {
     		err = handleDNSError(err, "SRV") // may become nil
     		return nil, err
     	}
     	for _, s := range srvs {
    -		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
    +		lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
     		if err != nil {
     			err = handleDNSError(err, "A") // may become nil
     			if err == nil {
    @@ -243,9 +261,9 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
     			return nil, err
     		}
     		for _, a := range lbAddrs {
    -			ip, ok := formatIP(a)
    -			if !ok {
    -				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
    +			ip, err := formatIP(a)
    +			if err != nil {
    +				return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
     			}
     			addr := ip + ":" + strconv.Itoa(int(s.Port))
     			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
    @@ -269,8 +287,8 @@ func handleDNSError(err error, lookupType string) error {
     	return err
     }
     
    -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
    -	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
    +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
    +	ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
     	if err != nil {
     		if envconfig.TXTErrIgnore {
     			return nil
    @@ -297,17 +315,17 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
     	return d.cc.ParseServiceConfig(sc)
     }
     
    -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
    -	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
    +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
    +	addrs, err := d.resolver.LookupHost(ctx, d.host)
     	if err != nil {
     		err = handleDNSError(err, "A")
     		return nil, err
     	}
     	newAddrs := make([]resolver.Address, 0, len(addrs))
     	for _, a := range addrs {
    -		ip, ok := formatIP(a)
    -		if !ok {
    -			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
    +		ip, err := formatIP(a)
    +		if err != nil {
    +			return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
     		}
     		addr := ip + ":" + d.port
     		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
    @@ -316,8 +334,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
     }
     
     func (d *dnsResolver) lookup() (*resolver.State, error) {
    -	srv, srvErr := d.lookupSRV()
    -	addrs, hostErr := d.lookupHost()
    +	ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
    +	defer cancel()
    +	srv, srvErr := d.lookupSRV(ctx)
    +	addrs, hostErr := d.lookupHost(ctx)
     	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
     		return nil, hostErr
     	}
    @@ -327,24 +347,24 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
     		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
     	}
     	if !d.disableServiceConfig {
    -		state.ServiceConfig = d.lookupTXT()
    +		state.ServiceConfig = d.lookupTXT(ctx)
     	}
     	return &state, nil
     }
     
    -// formatIP returns ok = false if addr is not a valid textual representation of
    -// an IP address. If addr is an IPv4 address, return the addr and ok = true.
    +// formatIP returns an error if addr is not a valid textual representation of
    +// an IP address. If addr is an IPv4 address, return the addr and error = nil.
     // If addr is an IPv6 address, return the addr enclosed in square brackets and
    -// ok = true.
    -func formatIP(addr string) (addrIP string, ok bool) {
    -	ip := net.ParseIP(addr)
    -	if ip == nil {
    -		return "", false
    +// error = nil.
    +func formatIP(addr string) (string, error) {
    +	ip, err := netip.ParseAddr(addr)
    +	if err != nil {
    +		return "", err
     	}
    -	if ip.To4() != nil {
    -		return addr, true
    +	if ip.Is4() {
    +		return addr, nil
     	}
    -	return "[" + addr + "]", true
    +	return "[" + addr + "]", nil
     }
     
     // parseTarget takes the user input target string and default port, returns
    @@ -360,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
     	if target == "" {
     		return "", "", internal.ErrMissingAddr
     	}
    -	if ip := net.ParseIP(target); ip != nil {
    +	if _, err := netip.ParseAddr(target); err == nil {
     		// target is an IPv4 or IPv6(without brackets) address
     		return target, defaultPort, nil
     	}
    @@ -408,7 +428,7 @@ func chosenByPercentage(a *int) bool {
     	if a == nil {
     		return true
     	}
    -	return grpcrand.Intn(100)+1 <= *a
    +	return rand.IntN(100)+1 <= *a
     }
     
     func canaryingSC(js string) string {
    diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
    index c7fc557d00..c0eae4f5f8 100644
    --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
    +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
    @@ -28,7 +28,7 @@ import (
     
     // NetResolver groups the methods on net.Resolver that are used by the DNS
     // resolver implementation. This allows the default net.Resolver instance to be
    -// overidden from tests.
    +// overridden from tests.
     type NetResolver interface {
     	LookupHost(ctx context.Context, host string) (addrs []string, err error)
     	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
    @@ -50,16 +50,23 @@ var (
     
     // The following vars are overridden from tests.
     var (
    -	// MinResolutionRate is the minimum rate at which re-resolutions are
    -	// allowed. This helps to prevent excessive re-resolution.
    -	MinResolutionRate = 30 * time.Second
    -
     	// TimeAfterFunc is used by the DNS resolver to wait for the given duration
    -	// to elapse. In non-test code, this is implemented by time.After.  In test
    +	// to elapse. In non-test code, this is implemented by time.After. In test
     	// code, this can be used to control the amount of time the resolver is
     	// blocked waiting for the duration to elapse.
     	TimeAfterFunc func(time.Duration) <-chan time.Time
     
    +	// TimeNowFunc is used by the DNS resolver to get the current time.
    +	// In non-test code, this is implemented by time.Now. In test code,
    +	// this can be used to control the current time for the resolver.
    +	TimeNowFunc func() time.Time
    +
    +	// TimeUntilFunc is used by the DNS resolver to calculate the remaining
    +	// wait time for re-resolution. In non-test code, this is implemented by
    +	// time.Until. In test code, this can be used to control the remaining
    +	// time for resolver to wait for re-resolution.
    +	TimeUntilFunc func(time.Time) time.Duration
    +
     	// NewNetResolver returns the net.Resolver instance for the given target.
     	NewNetResolver func(string) (NetResolver, error)
     
    diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
    index afac56572a..b901c7bace 100644
    --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
    +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
    @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
     	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
     }
     
    -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
    +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
     
     func (*passthroughResolver) Close() {}
     
    diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
    new file mode 100644
    index 0000000000..fd33af51ae
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
    @@ -0,0 +1,42 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package stats provides internal stats related functionality.
    +package stats
    +
    +import "context"
    +
    +// Labels are the labels for metrics.
    +type Labels struct {
    +	// TelemetryLabels are the telemetry labels to record.
    +	TelemetryLabels map[string]string
    +}
    +
    +type labelsKey struct{}
    +
    +// GetLabels returns the Labels stored in the context, or nil if there is one.
    +func GetLabels(ctx context.Context) *Labels {
    +	labels, _ := ctx.Value(labelsKey{}).(*Labels)
    +	return labels
    +}
    +
    +// SetLabels sets the Labels in the context.
    +func SetLabels(ctx context.Context, labels *Labels) context.Context {
    +	// could also append
    +	return context.WithValue(ctx, labelsKey{}, labels)
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
    new file mode 100644
    index 0000000000..79044657be
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
    @@ -0,0 +1,105 @@
    +/*
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package stats
    +
    +import (
    +	"fmt"
    +
    +	estats "google.golang.org/grpc/experimental/stats"
    +	"google.golang.org/grpc/stats"
    +)
    +
    +// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
    +//
    +// It eats any record calls where the label values provided do not match the
    +// number of label keys.
    +type MetricsRecorderList struct {
    +	// metricsRecorders are the metrics recorders this list will forward to.
    +	metricsRecorders []estats.MetricsRecorder
    +}
    +
    +// NewMetricsRecorderList creates a new metric recorder list with all the stats
    +// handlers provided which implement the MetricsRecorder interface.
    +// If no stats handlers provided implement the MetricsRecorder interface,
    +// the MetricsRecorder list returned is a no-op.
    +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
    +	var mrs []estats.MetricsRecorder
    +	for _, sh := range shs {
    +		if mr, ok := sh.(estats.MetricsRecorder); ok {
    +			mrs = append(mrs, mr)
    +		}
    +	}
    +	return &MetricsRecorderList{
    +		metricsRecorders: mrs,
    +	}
    +}
    +
    +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
    +	if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
    +		panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
    +	}
    +}
    +
    +// RecordInt64Count records the measurement alongside labels on the int
    +// count associated with the provided handle.
    +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordInt64Count(handle, incr, labels...)
    +	}
    +}
    +
    +// RecordFloat64Count records the measurement alongside labels on the float
    +// count associated with the provided handle.
    +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordFloat64Count(handle, incr, labels...)
    +	}
    +}
    +
    +// RecordInt64Histo records the measurement alongside labels on the int
    +// histo associated with the provided handle.
    +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordInt64Histo(handle, incr, labels...)
    +	}
    +}
    +
    +// RecordFloat64Histo records the measurement alongside labels on the float
    +// histo associated with the provided handle.
    +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordFloat64Histo(handle, incr, labels...)
    +	}
    +}
    +
    +// RecordInt64Gauge records the measurement alongside labels on the int
    +// gauge associated with the provided handle.
    +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
    +	verifyLabels(handle.Descriptor(), labels...)
    +
    +	for _, metricRecorder := range l.metricsRecorders {
    +		metricRecorder.RecordInt64Gauge(handle, incr, labels...)
    +	}
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
    index 03ef2fedd5..1186f1e9a9 100644
    --- a/vendor/google.golang.org/grpc/internal/status/status.go
    +++ b/vendor/google.golang.org/grpc/internal/status/status.go
    @@ -31,10 +31,11 @@ import (
     	"errors"
     	"fmt"
     
    -	"github.com/golang/protobuf/proto"
    -	"github.com/golang/protobuf/ptypes"
     	spb "google.golang.org/genproto/googleapis/rpc/status"
     	"google.golang.org/grpc/codes"
    +	"google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/protoadapt"
    +	"google.golang.org/protobuf/types/known/anypb"
     )
     
     // Status represents an RPC status code, message, and details.  It is immutable
    @@ -130,36 +131,69 @@ func (s *Status) Err() error {
     
     // WithDetails returns a new status with the provided details messages appended to the status.
     // If any errors are encountered, it returns nil and the first error encountered.
    -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
    +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
     	if s.Code() == codes.OK {
     		return nil, errors.New("no error details for status with code OK")
     	}
     	// s.Code() != OK implies that s.Proto() != nil.
     	p := s.Proto()
     	for _, detail := range details {
    -		any, err := ptypes.MarshalAny(detail)
    +		m, err := anypb.New(protoadapt.MessageV2Of(detail))
     		if err != nil {
     			return nil, err
     		}
    -		p.Details = append(p.Details, any)
    +		p.Details = append(p.Details, m)
     	}
     	return &Status{s: p}, nil
     }
     
     // Details returns a slice of details messages attached to the status.
     // If a detail cannot be decoded, the error is returned in place of the detail.
    +// If the detail can be decoded, the proto message returned is of the same
    +// type that was given to WithDetails().
     func (s *Status) Details() []any {
     	if s == nil || s.s == nil {
     		return nil
     	}
     	details := make([]any, 0, len(s.s.Details))
     	for _, any := range s.s.Details {
    -		detail := &ptypes.DynamicAny{}
    -		if err := ptypes.UnmarshalAny(any, detail); err != nil {
    +		detail, err := any.UnmarshalNew()
    +		if err != nil {
     			details = append(details, err)
     			continue
     		}
    -		details = append(details, detail.Message)
    +		// The call to MessageV1Of is required to unwrap the proto message if
    +		// it implemented only the MessageV1 API. The proto message would have
    +		// been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
    +		// added to a global registry used by any.UnmarshalNew().
    +		// MessageV1Of has the following behaviour:
    +		// 1. If the given message is a wrapped MessageV1, it returns the
    +		//   unwrapped value.
    +		// 2. If the given message already implements MessageV1, it returns it
    +		//   as is.
    +		// 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
    +		//
    +		// Since the Status.WithDetails() API only accepts MessageV1, calling
    +		// MessageV1Of ensures we return the same type that was given to
    +		// WithDetails:
    +		// * If the give type implemented only MessageV1, the unwrapping from
    +		//   point 1 above will restore the type.
    +		// * If the given type implemented both MessageV1 and MessageV2, point 2
    +		//   above will ensure no wrapping is performed.
    +		// * If the given type implemented only MessageV2 and was wrapped using
    +		//   MessageV1Of before passing to WithDetails(), it would be unwrapped
    +		//   in WithDetails by calling MessageV2Of(). Point 3 above will ensure
    +		//   that the type is wrapped in a MessageV1 wrapper again before
    +		//   returning. Note that protoc-gen-go doesn't generate code which
    +		//   implements ONLY MessageV2 at the time of writing.
    +		//
    +		// NOTE: Status details can also be added using the FromProto method.
    +		// This could theoretically allow passing a Detail message that only
    +		// implements the V2 API. In such a case the message will be wrapped in
    +		// a MessageV1 wrapper when fetched using Details().
    +		// Since protoc-gen-go generates only code that implements both V1 and
    +		// V2 APIs for backward compatibility, this is not a concern.
    +		details = append(details, protoadapt.MessageV1Of(detail))
     	}
     	return details
     }
    diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
    index 999f52cd75..54c24c2ff3 100644
    --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
    +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
    @@ -58,20 +58,20 @@ func GetRusage() *Rusage {
     
     // CPUTimeDiff returns the differences of user CPU time and system CPU time used
     // between two Rusage structs. It a no-op function for non-linux environments.
    -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
    +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
     	log()
     	return 0, 0
     }
     
     // SetTCPUserTimeout is a no-op function under non-linux environments.
    -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
    +func SetTCPUserTimeout(net.Conn, time.Duration) error {
     	log()
     	return nil
     }
     
     // GetTCPUserTimeout is a no-op function under non-linux environments.
     // A negative return value indicates the operation is not supported
    -func GetTCPUserTimeout(conn net.Conn) (int, error) {
    +func GetTCPUserTimeout(net.Conn) (int, error) {
     	log()
     	return -1, nil
     }
    diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
    index 078137b7fd..7e7aaa5463 100644
    --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
    +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
    @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
     		// combination of unconditionally enabling TCP keepalives here, and
     		// disabling the overriding of TCP keepalive parameters by setting the
     		// KeepAlive field to a negative value above, results in OS defaults for
    -		// the TCP keealive interval and time parameters.
    +		// the TCP keepalive interval and time parameters.
     		Control: func(_, _ string, c syscall.RawConn) error {
     			return c.Control(func(fd uintptr) {
     				unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
    diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
    index fd7d43a890..d5c1085eea 100644
    --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
    +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
    @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
     		// combination of unconditionally enabling TCP keepalives here, and
     		// disabling the overriding of TCP keepalive parameters by setting the
     		// KeepAlive field to a negative value above, results in OS defaults for
    -		// the TCP keealive interval and time parameters.
    +		// the TCP keepalive interval and time parameters.
     		Control: func(_, _ string, c syscall.RawConn) error {
     			return c.Control(func(fd uintptr) {
     				windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
    diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
    new file mode 100644
    index 0000000000..8ed347c541
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
    @@ -0,0 +1,144 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package transport
    +
    +import (
    +	"sync/atomic"
    +
    +	"golang.org/x/net/http2"
    +	"google.golang.org/grpc/mem"
    +	"google.golang.org/grpc/metadata"
    +	"google.golang.org/grpc/status"
    +)
    +
    +// ClientStream implements streaming functionality for a gRPC client.
    +type ClientStream struct {
    +	*Stream // Embed for common stream functionality.
    +
    +	ct       *http2Client
    +	done     chan struct{} // closed at the end of stream to unblock writers.
    +	doneFunc func()        // invoked at the end of stream.
    +
    +	headerChan       chan struct{} // closed to indicate the end of header metadata.
    +	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
    +	// headerValid indicates whether a valid header was received.  Only
    +	// meaningful after headerChan is closed (always call waitOnHeader() before
    +	// reading its value).
    +	headerValid bool
    +	header      metadata.MD // the received header metadata
    +	noHeaders   bool        // set if the client never received headers (set only after the stream is done).
    +
    +	bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
    +	unprocessed   atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
    +
    +	status *status.Status // the status error received from the server
    +}
    +
    +// Read reads an n byte message from the input stream.
    +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
    +	b, err := s.Stream.read(n)
    +	if err == nil {
    +		s.ct.incrMsgRecv()
    +	}
    +	return b, err
    +}
    +
    +// Close closes the stream and popagates err to any readers.
    +func (s *ClientStream) Close(err error) {
    +	var (
    +		rst     bool
    +		rstCode http2.ErrCode
    +	)
    +	if err != nil {
    +		rst = true
    +		rstCode = http2.ErrCodeCancel
    +	}
    +	s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
    +}
    +
    +// Write writes the hdr and data bytes to the output stream.
    +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
    +	return s.ct.write(s, hdr, data, opts)
    +}
    +
    +// BytesReceived indicates whether any bytes have been received on this stream.
    +func (s *ClientStream) BytesReceived() bool {
    +	return s.bytesReceived.Load()
    +}
    +
    +// Unprocessed indicates whether the server did not process this stream --
    +// i.e. it sent a refused stream or GOAWAY including this stream ID.
    +func (s *ClientStream) Unprocessed() bool {
    +	return s.unprocessed.Load()
    +}
    +
    +func (s *ClientStream) waitOnHeader() {
    +	select {
    +	case <-s.ctx.Done():
    +		// Close the stream to prevent headers/trailers from changing after
    +		// this function returns.
    +		s.Close(ContextErr(s.ctx.Err()))
    +		// headerChan could possibly not be closed yet if closeStream raced
    +		// with operateHeaders; wait until it is closed explicitly here.
    +		<-s.headerChan
    +	case <-s.headerChan:
    +	}
    +}
    +
    +// RecvCompress returns the compression algorithm applied to the inbound
    +// message. It is empty string if there is no compression applied.
    +func (s *ClientStream) RecvCompress() string {
    +	s.waitOnHeader()
    +	return s.recvCompress
    +}
    +
    +// Done returns a channel which is closed when it receives the final status
    +// from the server.
    +func (s *ClientStream) Done() <-chan struct{} {
    +	return s.done
    +}
    +
    +// Header returns the header metadata of the stream. Acquires the key-value
    +// pairs of header metadata once it is available. It blocks until i) the
    +// metadata is ready or ii) there is no header metadata or iii) the stream is
    +// canceled/expired.
    +func (s *ClientStream) Header() (metadata.MD, error) {
    +	s.waitOnHeader()
    +
    +	if !s.headerValid || s.noHeaders {
    +		return nil, s.status.Err()
    +	}
    +
    +	return s.header.Copy(), nil
    +}
    +
    +// TrailersOnly blocks until a header or trailers-only frame is received and
    +// then returns true if the stream was trailers-only.  If the stream ends
    +// before headers are received, returns true, nil.
    +func (s *ClientStream) TrailersOnly() bool {
    +	s.waitOnHeader()
    +	return s.noHeaders
    +}
    +
    +// Status returns the status received from the server.
    +// Status can be read safely only after the stream has ended,
    +// that is, after Done() is closed.
    +func (s *ClientStream) Status() *status.Status {
    +	return s.status
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    index b330ccedc8..ef72fbb3a0 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
    @@ -32,6 +32,7 @@ import (
     	"golang.org/x/net/http2/hpack"
     	"google.golang.org/grpc/internal/grpclog"
     	"google.golang.org/grpc/internal/grpcutil"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/status"
     )
     
    @@ -148,9 +149,9 @@ type dataFrame struct {
     	streamID  uint32
     	endStream bool
     	h         []byte
    -	d         []byte
    +	reader    mem.Reader
     	// onEachWrite is called every time
    -	// a part of d is written out.
    +	// a part of data is written out.
     	onEachWrite func()
     }
     
    @@ -193,7 +194,7 @@ type goAway struct {
     	code      http2.ErrCode
     	debugData []byte
     	headsUp   bool
    -	closeConn error // if set, loopyWriter will exit, resulting in conn closure
    +	closeConn error // if set, loopyWriter will exit with this error
     }
     
     func (*goAway) isTransportResponseFrame() bool { return false }
    @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
     }
     
     // controlBuffer is a way to pass information to loopy.
    -// Information is passed as specific struct types called control frames.
    -// A control frame not only represents data, messages or headers to be sent out
    -// but can also be used to instruct loopy to update its internal state.
    -// It shouldn't be confused with an HTTP2 frame, although some of the control frames
    -// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
    +//
    +// Information is passed as specific struct types called control frames. A
    +// control frame not only represents data, messages or headers to be sent out
    +// but can also be used to instruct loopy to update its internal state. It
    +// shouldn't be confused with an HTTP2 frame, although some of the control
    +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
     type controlBuffer struct {
    -	ch              chan struct{}
    -	done            <-chan struct{}
    +	wakeupCh chan struct{}   // Unblocks readers waiting for something to read.
    +	done     <-chan struct{} // Closed when the transport is done.
    +
    +	// Mutex guards all the fields below, except trfChan which can be read
    +	// atomically without holding mu.
     	mu              sync.Mutex
    -	consumerWaiting bool
    -	list            *itemList
    -	err             error
    +	consumerWaiting bool      // True when readers are blocked waiting for new data.
    +	closed          bool      // True when the controlbuf is finished.
    +	list            *itemList // List of queued control frames.
     
     	// transportResponseFrames counts the number of queued items that represent
     	// the response of an action initiated by the peer.  trfChan is created
    @@ -308,47 +313,59 @@ type controlBuffer struct {
     	// closed and nilled when transportResponseFrames drops below the
     	// threshold.  Both fields are protected by mu.
     	transportResponseFrames int
    -	trfChan                 atomic.Value // chan struct{}
    +	trfChan                 atomic.Pointer[chan struct{}]
     }
     
     func newControlBuffer(done <-chan struct{}) *controlBuffer {
     	return &controlBuffer{
    -		ch:   make(chan struct{}, 1),
    -		list: &itemList{},
    -		done: done,
    +		wakeupCh: make(chan struct{}, 1),
    +		list:     &itemList{},
    +		done:     done,
     	}
     }
     
    -// throttle blocks if there are too many incomingSettings/cleanupStreams in the
    -// controlbuf.
    +// throttle blocks if there are too many frames in the control buf that
    +// represent the response of an action initiated by the peer, like
    +// incomingSettings cleanupStreams etc.
     func (c *controlBuffer) throttle() {
    -	ch, _ := c.trfChan.Load().(chan struct{})
    -	if ch != nil {
    +	if ch := c.trfChan.Load(); ch != nil {
     		select {
    -		case <-ch:
    +		case <-(*ch):
     		case <-c.done:
     		}
     	}
     }
     
    +// put adds an item to the controlbuf.
     func (c *controlBuffer) put(it cbItem) error {
     	_, err := c.executeAndPut(nil, it)
     	return err
     }
     
    -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
    -	var wakeUp bool
    +// executeAndPut runs f, and if the return value is true, adds the given item to
    +// the controlbuf. The item could be nil, in which case, this method simply
    +// executes f and does not add the item to the controlbuf.
    +//
    +// The first return value indicates whether the item was successfully added to
    +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
    +// if the control buffer is already closed.
    +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
     	c.mu.Lock()
    -	if c.err != nil {
    -		c.mu.Unlock()
    -		return false, c.err
    +	defer c.mu.Unlock()
    +
    +	if c.closed {
    +		return false, ErrConnClosing
     	}
     	if f != nil {
    -		if !f(it) { // f wasn't successful
    -			c.mu.Unlock()
    +		if !f() { // f wasn't successful
     			return false, nil
     		}
     	}
    +	if it == nil {
    +		return true, nil
    +	}
    +
    +	var wakeUp bool
     	if c.consumerWaiting {
     		wakeUp = true
     		c.consumerWaiting = false
    @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err
     		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
     			// We are adding the frame that puts us over the threshold; create
     			// a throttling channel.
    -			c.trfChan.Store(make(chan struct{}))
    +			ch := make(chan struct{})
    +			c.trfChan.Store(&ch)
     		}
     	}
    -	c.mu.Unlock()
     	if wakeUp {
     		select {
    -		case c.ch <- struct{}{}:
    +		case c.wakeupCh <- struct{}{}:
     		default:
     		}
     	}
     	return true, nil
     }
     
    -// Note argument f should never be nil.
    -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
    -	c.mu.Lock()
    -	if c.err != nil {
    -		c.mu.Unlock()
    -		return false, c.err
    -	}
    -	if !f(it) { // f wasn't successful
    -		c.mu.Unlock()
    -		return false, nil
    -	}
    -	c.mu.Unlock()
    -	return true, nil
    -}
    -
    +// get returns the next control frame from the control buffer. If block is true
    +// **and** there are no control frames in the control buffer, the call blocks
    +// until one of the conditions is met: there is a frame to return or the
    +// transport is closed.
     func (c *controlBuffer) get(block bool) (any, error) {
     	for {
     		c.mu.Lock()
    -		if c.err != nil {
    +		frame, err := c.getOnceLocked()
    +		if frame != nil || err != nil || !block {
    +			// If we read a frame or an error, we can return to the caller. The
    +			// call to getOnceLocked() returns a nil frame and a nil error if
    +			// there is nothing to read, and in that case, if the caller asked
    +			// us not to block, we can return now as well.
     			c.mu.Unlock()
    -			return nil, c.err
    -		}
    -		if !c.list.isEmpty() {
    -			h := c.list.dequeue().(cbItem)
    -			if h.isTransportResponseFrame() {
    -				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
    -					// We are removing the frame that put us over the
    -					// threshold; close and clear the throttling channel.
    -					ch := c.trfChan.Load().(chan struct{})
    -					close(ch)
    -					c.trfChan.Store((chan struct{})(nil))
    -				}
    -				c.transportResponseFrames--
    -			}
    -			c.mu.Unlock()
    -			return h, nil
    -		}
    -		if !block {
    -			c.mu.Unlock()
    -			return nil, nil
    +			return frame, err
     		}
     		c.consumerWaiting = true
     		c.mu.Unlock()
    +
    +		// Release the lock above and wait to be woken up.
     		select {
    -		case <-c.ch:
    +		case <-c.wakeupCh:
     		case <-c.done:
     			return nil, errors.New("transport closed by client")
     		}
     	}
     }
     
    +// Callers must not use this method, but should instead use get().
    +//
    +// Caller must hold c.mu.
    +func (c *controlBuffer) getOnceLocked() (any, error) {
    +	if c.closed {
    +		return false, ErrConnClosing
    +	}
    +	if c.list.isEmpty() {
    +		return nil, nil
    +	}
    +	h := c.list.dequeue().(cbItem)
    +	if h.isTransportResponseFrame() {
    +		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
    +			// We are removing the frame that put us over the
    +			// threshold; close and clear the throttling channel.
    +			ch := c.trfChan.Swap(nil)
    +			close(*ch)
    +		}
    +		c.transportResponseFrames--
    +	}
    +	return h, nil
    +}
    +
    +// finish closes the control buffer, cleaning up any streams that have queued
    +// header frames. Once this method returns, no more frames can be added to the
    +// control buffer, and attempts to do so will return ErrConnClosing.
     func (c *controlBuffer) finish() {
     	c.mu.Lock()
    -	if c.err != nil {
    -		c.mu.Unlock()
    +	defer c.mu.Unlock()
    +
    +	if c.closed {
     		return
     	}
    -	c.err = ErrConnClosing
    +	c.closed = true
     	// There may be headers for streams in the control buffer.
     	// These streams need to be cleaned out since the transport
     	// is still not aware of these yet.
     	for head := c.list.dequeueAll(); head != nil; head = head.next {
    -		hdr, ok := head.it.(*headerFrame)
    -		if !ok {
    -			continue
    -		}
    -		if hdr.onOrphaned != nil { // It will be nil on the server-side.
    -			hdr.onOrphaned(ErrConnClosing)
    +		switch v := head.it.(type) {
    +		case *headerFrame:
    +			if v.onOrphaned != nil { // It will be nil on the server-side.
    +				v.onOrphaned(ErrConnClosing)
    +			}
    +		case *dataFrame:
    +			_ = v.reader.Close()
     		}
     	}
    +
     	// In case throttle() is currently in flight, it needs to be unblocked.
     	// Otherwise, the transport may not close, since the transport is closed by
     	// the reader encountering the connection error.
    -	ch, _ := c.trfChan.Load().(chan struct{})
    +	ch := c.trfChan.Swap(nil)
     	if ch != nil {
    -		close(ch)
    +		close(*ch)
     	}
    -	c.trfChan.Store((chan struct{})(nil))
    -	c.mu.Unlock()
     }
     
     type side int
    @@ -466,7 +487,7 @@ const (
     // stream maintains a queue of data frames; as loopy receives data frames
     // it gets added to the queue of the relevant stream.
     // Loopy goes over this list of active streams by processing one node every iteration,
    -// thereby closely resemebling to a round-robin scheduling over all streams. While
    +// thereby closely resembling a round-robin scheduling over all streams. While
     // processing a stream, loopy writes out data bytes from this stream capped by the min
     // of http2MaxFrameLen, connection-level flow control and stream-level flow control.
     type loopyWriter struct {
    @@ -490,26 +511,29 @@ type loopyWriter struct {
     	draining      bool
     	conn          net.Conn
     	logger        *grpclog.PrefixLogger
    +	bufferPool    mem.BufferPool
     
     	// Side-specific handlers
     	ssGoAwayHandler func(*goAway) (bool, error)
     }
     
    -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
    +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
     	var buf bytes.Buffer
     	l := &loopyWriter{
    -		side:          s,
    -		cbuf:          cbuf,
    -		sendQuota:     defaultWindowSize,
    -		oiws:          defaultWindowSize,
    -		estdStreams:   make(map[uint32]*outStream),
    -		activeStreams: newOutStreamList(),
    -		framer:        fr,
    -		hBuf:          &buf,
    -		hEnc:          hpack.NewEncoder(&buf),
    -		bdpEst:        bdpEst,
    -		conn:          conn,
    -		logger:        logger,
    +		side:            s,
    +		cbuf:            cbuf,
    +		sendQuota:       defaultWindowSize,
    +		oiws:            defaultWindowSize,
    +		estdStreams:     make(map[uint32]*outStream),
    +		activeStreams:   newOutStreamList(),
    +		framer:          fr,
    +		hBuf:            &buf,
    +		hEnc:            hpack.NewEncoder(&buf),
    +		bdpEst:          bdpEst,
    +		conn:            conn,
    +		logger:          logger,
    +		ssGoAwayHandler: goAwayHandler,
    +		bufferPool:      bufferPool,
     	}
     	return l
     }
    @@ -535,8 +559,8 @@ const minBatchSize = 1000
     // size is too low to give stream goroutines a chance to fill it up.
     //
     // Upon exiting, if the error causing the exit is not an I/O error, run()
    -// flushes and closes the underlying connection.  Otherwise, the connection is
    -// left open to allow the I/O error to be encountered by the reader instead.
    +// flushes the underlying connection.  The connection is always left open to
    +// allow different closing behavior on the client and server.
     func (l *loopyWriter) run() (err error) {
     	defer func() {
     		if l.logger.V(logLevel) {
    @@ -544,7 +568,6 @@ func (l *loopyWriter) run() (err error) {
     		}
     		if !isIOError(err) {
     			l.framer.writer.Flush()
    -			l.conn.Close()
     		}
     		l.cbuf.finish()
     	}()
    @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
     		// not be established yet.
     		delete(l.estdStreams, c.streamID)
     		str.deleteSelf()
    +		for head := str.itl.dequeueAll(); head != nil; head = head.next {
    +			if df, ok := head.it.(*dataFrame); ok {
    +				_ = df.reader.Close()
    +			}
    +		}
     	}
     	if c.rst { // If RST_STREAM needs to be sent.
     		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
    @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
     	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
     	// A data item is represented by a dataFrame, since it later translates into
     	// multiple HTTP2 data frames.
    -	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
    -	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
    -	// maximum possible HTTP2 frame size.
    +	// Every dataFrame has two buffers; h that keeps grpc-message header and data
    +	// that is the actual message. As an optimization to keep wire traffic low, data
    +	// from data is copied to h to make as big as the maximum possible HTTP2 frame
    +	// size.
     
    -	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
    +	if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
     		// Client sends out empty data frame with endStream = true
     		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
     			return false, err
     		}
     		str.itl.dequeue() // remove the empty data item from stream
    +		_ = dataItem.reader.Close()
     		if str.itl.isEmpty() {
     			str.state = empty
     		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
    @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
     		}
     		return false, nil
     	}
    -	var (
    -		buf []byte
    -	)
    +
     	// Figure out the maximum size we can send
     	maxSize := http2MaxFrameLen
     	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
    @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
     	}
     	// Compute how much of the header and data we can send within quota and max frame length
     	hSize := min(maxSize, len(dataItem.h))
    -	dSize := min(maxSize-hSize, len(dataItem.d))
    -	if hSize != 0 {
    -		if dSize == 0 {
    -			buf = dataItem.h
    -		} else {
    -			// We can add some data to grpc message header to distribute bytes more equally across frames.
    -			// Copy on the stack to avoid generating garbage
    -			var localBuf [http2MaxFrameLen]byte
    -			copy(localBuf[:hSize], dataItem.h)
    -			copy(localBuf[hSize:], dataItem.d[:dSize])
    -			buf = localBuf[:hSize+dSize]
    -		}
    +	dSize := min(maxSize-hSize, dataItem.reader.Remaining())
    +	remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
    +	size := hSize + dSize
    +
    +	var buf *[]byte
    +
    +	if hSize != 0 && dSize == 0 {
    +		buf = &dataItem.h
     	} else {
    -		buf = dataItem.d
    -	}
    +		// Note: this is only necessary because the http2.Framer does not support
    +		// partially writing a frame, so the sequence must be materialized into a buffer.
    +		// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
    +		pool := l.bufferPool
    +		if pool == nil {
    +			// Note that this is only supposed to be nil in tests. Otherwise, stream is
    +			// always initialized with a BufferPool.
    +			pool = mem.DefaultBufferPool()
    +		}
    +		buf = pool.Get(size)
    +		defer pool.Put(buf)
     
    -	size := hSize + dSize
    +		copy((*buf)[:hSize], dataItem.h)
    +		_, _ = dataItem.reader.Read((*buf)[hSize:])
    +	}
     
     	// Now that outgoing flow controls are checked we can replenish str's write quota
     	str.wq.replenish(size)
     	var endStream bool
     	// If this is the last data message on this stream and all of it can be written in this iteration.
    -	if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
    +	if dataItem.endStream && remainingBytes == 0 {
     		endStream = true
     	}
     	if dataItem.onEachWrite != nil {
     		dataItem.onEachWrite()
     	}
    -	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
    +	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
     		return false, err
     	}
     	str.bytesOutStanding += size
     	l.sendQuota -= uint32(size)
     	dataItem.h = dataItem.h[hSize:]
    -	dataItem.d = dataItem.d[dSize:]
     
    -	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
    +	if remainingBytes == 0 { // All the data from that message was written out.
    +		_ = dataItem.reader.Close()
     		str.itl.dequeue()
     	}
     	if str.itl.isEmpty() {
    @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
     	}
     	return false, nil
     }
    -
    -func min(a, b int) int {
    -	if a < b {
    -		return a
    -	}
    -	return b
    -}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    index 97198c5158..dfc0f224ec 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
    @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 {
     
     func (f *trInFlow) onData(n uint32) uint32 {
     	f.unacked += n
    -	if f.unacked >= f.limit/4 {
    -		w := f.unacked
    -		f.unacked = 0
    +	if f.unacked < f.limit/4 {
     		f.updateEffectiveWindowSize()
    -		return w
    +		return 0
     	}
    -	f.updateEffectiveWindowSize()
    -	return 0
    +	return f.reset()
     }
     
     func (f *trInFlow) reset() uint32 {
    diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    index a9d70e2a16..d9305a65d8 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
    @@ -24,7 +24,6 @@
     package transport
     
     import (
    -	"bytes"
     	"context"
     	"errors"
     	"fmt"
    @@ -35,30 +34,27 @@ import (
     	"sync"
     	"time"
     
    -	"github.com/golang/protobuf/proto"
     	"golang.org/x/net/http2"
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/internal/grpclog"
     	"google.golang.org/grpc/internal/grpcutil"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
     	"google.golang.org/grpc/stats"
     	"google.golang.org/grpc/status"
    +	"google.golang.org/protobuf/proto"
     )
     
     // NewServerHandlerTransport returns a ServerTransport handling gRPC from
     // inside an http.Handler, or writes an HTTP error to w and returns an error.
     // It requires that the http Server supports HTTP/2.
    -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
    -	if r.ProtoMajor != 2 {
    -		msg := "gRPC requires HTTP/2"
    -		http.Error(w, msg, http.StatusBadRequest)
    -		return nil, errors.New(msg)
    -	}
    -	if r.Method != "POST" {
    +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
    +	if r.Method != http.MethodPost {
    +		w.Header().Set("Allow", http.MethodPost)
     		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
    -		http.Error(w, msg, http.StatusBadRequest)
    +		http.Error(w, msg, http.StatusMethodNotAllowed)
     		return nil, errors.New(msg)
     	}
     	contentType := r.Header.Get("Content-Type")
    @@ -69,6 +65,11 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
     		http.Error(w, msg, http.StatusUnsupportedMediaType)
     		return nil, errors.New(msg)
     	}
    +	if r.ProtoMajor != 2 {
    +		msg := "gRPC requires HTTP/2"
    +		http.Error(w, msg, http.StatusHTTPVersionNotSupported)
    +		return nil, errors.New(msg)
    +	}
     	if _, ok := w.(http.Flusher); !ok {
     		msg := "gRPC requires a ResponseWriter supporting http.Flusher"
     		http.Error(w, msg, http.StatusInternalServerError)
    @@ -97,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
     		contentType:    contentType,
     		contentSubtype: contentSubtype,
     		stats:          stats,
    +		bufferPool:     bufferPool,
     	}
     	st.logger = prefixLoggerForServerHandlerTransport(st)
     
    @@ -170,6 +172,8 @@ type serverHandlerTransport struct {
     
     	stats  []stats.Handler
     	logger *grpclog.PrefixLogger
    +
    +	bufferPool mem.BufferPool
     }
     
     func (ht *serverHandlerTransport) Close(err error) {
    @@ -221,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error {
     	}
     }
     
    -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
    +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
     	ht.writeStatusMu.Lock()
     	defer ht.writeStatusMu.Unlock()
     
    @@ -243,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
     		}
     
     		s.hdrMu.Lock()
    +		defer s.hdrMu.Unlock()
     		if p := st.Proto(); p != nil && len(p.Details) > 0 {
     			delete(s.trailer, grpcStatusDetailsBinHeader)
     			stBytes, err := proto.Marshal(p)
    @@ -267,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
     				}
     			}
     		}
    -		s.hdrMu.Unlock()
     	})
     
     	if err == nil { // transport has not been closed
    @@ -285,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
     
     // writePendingHeaders sets common and custom headers on the first
     // write call (Write, WriteHeader, or WriteStatus)
    -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
    +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
     	ht.writeCommonHeaders(s)
     	ht.writeCustomHeaders(s)
     }
     
     // writeCommonHeaders sets common headers on the first write
     // call (Write, WriteHeader, or WriteStatus).
    -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
    +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
     	h := ht.rw.Header()
     	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
     	h.Set("Content-Type", ht.contentType)
    @@ -313,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
     
     // writeCustomHeaders sets custom headers set on the stream via SetHeader
     // on the first write call (Write, WriteHeader, or WriteStatus)
    -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
    +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
     	h := ht.rw.Header()
     
     	s.hdrMu.Lock()
    @@ -329,19 +333,31 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
     	s.hdrMu.Unlock()
     }
     
    -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
    +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
    +	// Always take a reference because otherwise there is no guarantee the data will
    +	// be available after this function returns. This is what callers to Write
    +	// expect.
    +	data.Ref()
     	headersWritten := s.updateHeaderSent()
    -	return ht.do(func() {
    +	err := ht.do(func() {
    +		defer data.Free()
     		if !headersWritten {
     			ht.writePendingHeaders(s)
     		}
     		ht.rw.Write(hdr)
    -		ht.rw.Write(data)
    +		for _, b := range data {
    +			_, _ = ht.rw.Write(b.ReadOnlyData())
    +		}
     		ht.rw.(http.Flusher).Flush()
     	})
    +	if err != nil {
    +		data.Free()
    +		return err
    +	}
    +	return nil
     }
     
    -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
    +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
     	if err := s.SetHeader(md); err != nil {
     		return err
     	}
    @@ -369,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
     	return err
     }
     
    -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
    +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
     	// With this transport type there will be exactly 1 stream: this HTTP request.
     	var cancel context.CancelFunc
     	if ht.timeoutSet {
    @@ -392,20 +408,22 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
     
     	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
     	req := ht.req
    -	s := &Stream{
    -		id:               0, // irrelevant
    -		ctx:              ctx,
    -		requestRead:      func(int) {},
    +	s := &ServerStream{
    +		Stream: &Stream{
    +			id:             0, // irrelevant
    +			ctx:            ctx,
    +			requestRead:    func(int) {},
    +			buf:            newRecvBuffer(),
    +			method:         req.URL.Path,
    +			recvCompress:   req.Header.Get("grpc-encoding"),
    +			contentSubtype: ht.contentSubtype,
    +		},
     		cancel:           cancel,
    -		buf:              newRecvBuffer(),
     		st:               ht,
    -		method:           req.URL.Path,
    -		recvCompress:     req.Header.Get("grpc-encoding"),
    -		contentSubtype:   ht.contentSubtype,
     		headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
     	}
     	s.trReader = &transportReader{
    -		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
    +		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
     		windowHandler: func(int) {},
     	}
     
    @@ -414,21 +432,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
     	go func() {
     		defer close(readerDone)
     
    -		// TODO: minimize garbage, optimize recvBuffer code/ownership
    -		const readSize = 8196
    -		for buf := make([]byte, readSize); ; {
    -			n, err := req.Body.Read(buf)
    +		for {
    +			buf := ht.bufferPool.Get(http2MaxFrameLen)
    +			n, err := req.Body.Read(*buf)
     			if n > 0 {
    -				s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
    -				buf = buf[n:]
    +				*buf = (*buf)[:n]
    +				s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
    +			} else {
    +				ht.bufferPool.Put(buf)
     			}
     			if err != nil {
     				s.buf.put(recvMsg{err: mapRecvMsgError(err)})
     				return
     			}
    -			if len(buf) == 0 {
    -				buf = make([]byte, readSize)
    -			}
     		}
     	}()
     
    @@ -457,11 +473,9 @@ func (ht *serverHandlerTransport) runStream() {
     	}
     }
     
    -func (ht *serverHandlerTransport) IncrMsgSent() {}
    -
    -func (ht *serverHandlerTransport) IncrMsgRecv() {}
    +func (ht *serverHandlerTransport) incrMsgRecv() {}
     
    -func (ht *serverHandlerTransport) Drain(debugData string) {
    +func (ht *serverHandlerTransport) Drain(string) {
     	panic("Drain() is not implemented")
     }
     
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    index c33ac5961b..f323ab7f45 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
    @@ -47,6 +47,7 @@ import (
     	isyscall "google.golang.org/grpc/internal/syscall"
     	"google.golang.org/grpc/internal/transport/networktype"
     	"google.golang.org/grpc/keepalive"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
     	"google.golang.org/grpc/resolver"
    @@ -59,6 +60,8 @@ import (
     // atomically.
     var clientConnectionCounter uint64
     
    +var goAwayLoopyWriterTimeout = 5 * time.Second
    +
     var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
     
     // http2Client implements the ClientTransport interface with HTTP2.
    @@ -83,9 +86,9 @@ type http2Client struct {
     	writerDone chan struct{} // sync point to enable testing.
     	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
     	// that the server sent GoAway on this transport.
    -	goAway chan struct{}
    -
    -	framer *framer
    +	goAway        chan struct{}
    +	keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
    +	framer        *framer
     	// controlBuf delivers all the control related tasks (e.g., window
     	// updates, reset streams, and various settings) to the controller.
     	// Do not access controlBuf with mu held.
    @@ -114,13 +117,13 @@ type http2Client struct {
     	streamQuota           int64
     	streamsQuotaAvailable chan struct{}
     	waitingStreams        uint32
    -	nextID                uint32
     	registeredCompressors string
     
     	// Do not access controlBuf with mu held.
     	mu            sync.Mutex // guard the following variables
    +	nextID        uint32
     	state         transportState
    -	activeStreams map[uint32]*Stream
    +	activeStreams map[uint32]*ClientStream
     	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
     	prevGoAwayID uint32
     	// goAwayReason records the http2.ErrCode and debug data received with the
    @@ -140,13 +143,11 @@ type http2Client struct {
     	// variable.
     	kpDormant bool
     
    -	// Fields below are for channelz metric collection.
    -	channelzID *channelz.Identifier
    -	czData     *channelzData
    +	channelz *channelz.Socket
     
     	onClose func(GoAwayReason)
     
    -	bufferPool *bufferPool
    +	bufferPool mem.BufferPool
     
     	connectionID uint64
     	logger       *grpclog.PrefixLogger
    @@ -198,10 +199,10 @@ func isTemporary(err error) bool {
     	return true
     }
     
    -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
    +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
     // and starts to receive messages on it. Non-nil error returns if construction
     // fails.
    -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
    +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
     	scheme := "http"
     	ctx, cancel := context.WithCancel(ctx)
     	defer func() {
    @@ -231,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		}
     	}(conn)
     
    -	// The following defer and goroutine monitor the connectCtx for cancelation
    +	// The following defer and goroutine monitor the connectCtx for cancellation
     	// and deadline.  On context expiration, the connection is hard closed and
     	// this function will naturally fail as a result.  Otherwise, the defer
     	// waits for the goroutine to exit to prevent the context from being
    @@ -319,6 +320,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     	if opts.MaxHeaderListSize != nil {
     		maxHeaderListSize = *opts.MaxHeaderListSize
     	}
    +
     	t := &http2Client{
     		ctx:                   ctx,
     		ctxDone:               ctx.Done(), // Cache Done chan.
    @@ -333,10 +335,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		readerDone:            make(chan struct{}),
     		writerDone:            make(chan struct{}),
     		goAway:                make(chan struct{}),
    +		keepaliveDone:         make(chan struct{}),
     		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
     		fc:                    &trInFlow{limit: uint32(icwz)},
     		scheme:                scheme,
    -		activeStreams:         make(map[uint32]*Stream),
    +		activeStreams:         make(map[uint32]*ClientStream),
     		isSecure:              isSecure,
     		perRPCCreds:           perRPCCreds,
     		kp:                    kp,
    @@ -346,11 +349,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		maxConcurrentStreams:  defaultMaxStreamsClient,
     		streamQuota:           defaultMaxStreamsClient,
     		streamsQuotaAvailable: make(chan struct{}, 1),
    -		czData:                new(channelzData),
     		keepaliveEnabled:      keepaliveEnabled,
    -		bufferPool:            newBufferPool(),
    +		bufferPool:            opts.BufferPool,
     		onClose:               onClose,
     	}
    +	var czSecurity credentials.ChannelzSecurityValue
    +	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
    +		czSecurity = au.GetSecurityValue()
    +	}
    +	t.channelz = channelz.RegisterSocket(
    +		&channelz.Socket{
    +			SocketType:       channelz.SocketTypeNormal,
    +			Parent:           opts.ChannelzParent,
    +			SocketMetrics:    channelz.SocketMetrics{},
    +			EphemeralMetrics: t.socketMetrics,
    +			LocalAddr:        t.localAddr,
    +			RemoteAddr:       t.remoteAddr,
    +			SocketOptions:    channelz.GetSocketOption(t.conn),
    +			Security:         czSecurity,
    +		})
     	t.logger = prefixLoggerForClientTransport(t)
     	// Add peer information to the http2client context.
     	t.ctx = peer.NewContext(t.ctx, t.getPeer())
    @@ -381,10 +398,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     		}
     		sh.HandleConn(t.ctx, connBegin)
     	}
    -	t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
    -	if err != nil {
    -		return nil, err
    -	}
     	if t.keepaliveEnabled {
     		t.kpDormancyCond = sync.NewCond(&t.mu)
     		go t.keepalive()
    @@ -399,10 +412,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     	readerErrCh := make(chan error, 1)
     	go t.reader(readerErrCh)
     	defer func() {
    -		if err == nil {
    -			err = <-readerErrCh
    -		}
     		if err != nil {
    +			// writerDone should be closed since the loopy goroutine
    +			// wouldn't have started in the case this function returns an error.
    +			close(t.writerDone)
     			t.Close(err)
     		}
     	}()
    @@ -449,25 +462,37 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
     	if err := t.framer.writer.Flush(); err != nil {
     		return nil, err
     	}
    +	// Block until the server preface is received successfully or an error occurs.
    +	if err = <-readerErrCh; err != nil {
    +		return nil, err
    +	}
     	go func() {
    -		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
    -		t.loopy.run()
    +		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
    +		if err := t.loopy.run(); !isIOError(err) {
    +			// Immediately close the connection, as the loopy writer returns
    +			// when there are no more active streams and we were draining (the
    +			// server sent a GOAWAY).  For I/O errors, the reader will hit it
    +			// after draining any remaining incoming data.
    +			t.conn.Close()
    +		}
     		close(t.writerDone)
     	}()
     	return t, nil
     }
     
    -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
    +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
     	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
    -	s := &Stream{
    -		ct:             t,
    -		done:           make(chan struct{}),
    -		method:         callHdr.Method,
    -		sendCompress:   callHdr.SendCompress,
    -		buf:            newRecvBuffer(),
    -		headerChan:     make(chan struct{}),
    -		contentSubtype: callHdr.ContentSubtype,
    -		doneFunc:       callHdr.DoneFunc,
    +	s := &ClientStream{
    +		Stream: &Stream{
    +			method:         callHdr.Method,
    +			sendCompress:   callHdr.SendCompress,
    +			buf:            newRecvBuffer(),
    +			contentSubtype: callHdr.ContentSubtype,
    +		},
    +		ct:         t,
    +		done:       make(chan struct{}),
    +		headerChan: make(chan struct{}),
    +		doneFunc:   callHdr.DoneFunc,
     	}
     	s.wq = newWriteQuota(defaultWriteQuota, s.done)
     	s.requestRead = func(n int) {
    @@ -483,9 +508,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
     			ctxDone: s.ctx.Done(),
     			recv:    s.buf,
     			closeStream: func(err error) {
    -				t.CloseStream(s, err)
    +				s.Close(err)
     			},
    -			freeBuffer: t.bufferPool.put,
     		},
     		windowHandler: func(n int) {
     			t.updateWindow(s, uint32(n))
    @@ -502,6 +526,18 @@ func (t *http2Client) getPeer() *peer.Peer {
     	}
     }
     
    +// OutgoingGoAwayHandler writes a GOAWAY to the connection.  Always returns (false, err) as we want the GoAway
    +// to be the last frame loopy writes to the transport.
    +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
    +	t.mu.Lock()
    +	maxStreamID := t.nextID - 2
    +	t.mu.Unlock()
    +	if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
    +		return false, err
    +	}
    +	return false, g.closeConn
    +}
    +
     func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
     	aud := t.createAudience(callHdr)
     	ri := credentials.RequestInfo{
    @@ -563,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
     	for k, v := range callAuthData {
     		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
     	}
    -	if b := stats.OutgoingTags(ctx); b != nil {
    -		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
    -	}
    -	if b := stats.OutgoingTrace(ctx); b != nil {
    -		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
    -	}
     
     	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
     		var k string
    @@ -704,7 +734,7 @@ func (e NewStreamError) Error() string {
     
     // NewStream creates a stream and registers it into the transport as "active"
     // streams.  All non-nil errors returned will be *NewStreamError.
    -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
    +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
     	ctx = peer.NewContext(ctx, t.getPeer())
     
     	// ServerName field of the resolver returned address takes precedence over
    @@ -729,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     			return
     		}
     		// The stream was unprocessed by the server.
    -		atomic.StoreUint32(&s.unprocessed, 1)
    +		s.unprocessed.Store(true)
     		s.write(recvMsg{err: err})
     		close(s.done)
     		// If headerChan isn't closed, then close it.
    @@ -740,7 +770,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     	hdr := &headerFrame{
     		hf:        headerFields,
     		endStream: false,
    -		initStream: func(id uint32) error {
    +		initStream: func(uint32) error {
     			t.mu.Lock()
     			// TODO: handle transport closure in loopy instead and remove this
     			// initStream is never called when transport is draining.
    @@ -750,8 +780,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     				return ErrConnClosing
     			}
     			if channelz.IsOn() {
    -				atomic.AddInt64(&t.czData.streamsStarted, 1)
    -				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
    +				t.channelz.SocketMetrics.StreamsStarted.Add(1)
    +				t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
     			}
     			// If the keepalive goroutine has gone dormant, wake it up.
     			if t.kpDormant {
    @@ -766,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     	firstTry := true
     	var ch chan struct{}
     	transportDrainRequired := false
    -	checkForStreamQuota := func(it any) bool {
    +	checkForStreamQuota := func() bool {
     		if t.streamQuota <= 0 { // Can go negative if server decreases it.
     			if firstTry {
     				t.waitingStreams++
    @@ -778,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     			t.waitingStreams--
     		}
     		t.streamQuota--
    -		h := it.(*headerFrame)
    -		h.streamID = t.nextID
    -		t.nextID += 2
     
    -		// Drain client transport if nextID > MaxStreamID which signals gRPC that
    -		// the connection is closed and a new one must be created for subsequent RPCs.
    -		transportDrainRequired = t.nextID > MaxStreamID
    -
    -		s.id = h.streamID
    -		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
     		t.mu.Lock()
     		if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
     			t.mu.Unlock()
     			return false // Don't create a stream if the transport is already closed.
     		}
    +
    +		hdr.streamID = t.nextID
    +		t.nextID += 2
    +		// Drain client transport if nextID > MaxStreamID which signals gRPC that
    +		// the connection is closed and a new one must be created for subsequent RPCs.
    +		transportDrainRequired = t.nextID > MaxStreamID
    +
    +		s.id = hdr.streamID
    +		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
     		t.activeStreams[s.id] = s
     		t.mu.Unlock()
    +
     		if t.streamQuota > 0 && t.waitingStreams > 0 {
     			select {
     			case t.streamsQuotaAvailable <- struct{}{}:
    @@ -804,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     		return true
     	}
     	var hdrListSizeErr error
    -	checkForHeaderListSize := func(it any) bool {
    +	checkForHeaderListSize := func() bool {
     		if t.maxSendHeaderListSize == nil {
     			return true
     		}
    -		hdrFrame := it.(*headerFrame)
     		var sz int64
    -		for _, f := range hdrFrame.hf {
    +		for _, f := range hdr.hf {
     			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
     				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
     				return false
    @@ -819,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     		return true
     	}
     	for {
    -		success, err := t.controlBuf.executeAndPut(func(it any) bool {
    -			return checkForHeaderListSize(it) && checkForStreamQuota(it)
    +		success, err := t.controlBuf.executeAndPut(func() bool {
    +			return checkForHeaderListSize() && checkForStreamQuota()
     		}, hdr)
     		if err != nil {
     			// Connection closed.
    @@ -874,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
     	return s, nil
     }
     
    -// CloseStream clears the footprint of a stream when the stream is not needed any more.
    -// This must not be executed in reader's goroutine.
    -func (t *http2Client) CloseStream(s *Stream, err error) {
    -	var (
    -		rst     bool
    -		rstCode http2.ErrCode
    -	)
    -	if err != nil {
    -		rst = true
    -		rstCode = http2.ErrCodeCancel
    -	}
    -	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
    -}
    -
    -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
    +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
     	// Set stream status to done.
     	if s.swapState(streamDone) == streamDone {
     		// If it was already done, return.  If multiple closeStream calls
    @@ -922,16 +938,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
     			t.mu.Unlock()
     			if channelz.IsOn() {
     				if eosReceived {
    -					atomic.AddInt64(&t.czData.streamsSucceeded, 1)
    +					t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
     				} else {
    -					atomic.AddInt64(&t.czData.streamsFailed, 1)
    +					t.channelz.SocketMetrics.StreamsFailed.Add(1)
     				}
     			}
     		},
     		rst:     rst,
     		rstCode: rstCode,
     	}
    -	addBackStreamQuota := func(any) bool {
    +	addBackStreamQuota := func() bool {
     		t.streamQuota++
     		if t.streamQuota > 0 && t.waitingStreams > 0 {
     			select {
    @@ -951,8 +967,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
     
     // Close kicks off the shutdown process of the transport. This should be called
     // only once on a transport. Once it is called, the transport should not be
    -// accessed any more.
    +// accessed anymore.
     func (t *http2Client) Close(err error) {
    +	t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
     	t.mu.Lock()
     	// Make sure we only close once.
     	if t.state == closing {
    @@ -975,15 +992,33 @@ func (t *http2Client) Close(err error) {
     		// should unblock it so that the goroutine eventually exits.
     		t.kpDormancyCond.Signal()
     	}
    -	t.mu.Unlock()
    -	t.controlBuf.finish()
    -	t.cancel()
    -	t.conn.Close()
    -	channelz.RemoveEntry(t.channelzID)
     	// Append info about previous goaways if there were any, since this may be important
     	// for understanding the root cause for this connection to be closed.
    -	_, goAwayDebugMessage := t.GetGoAwayReason()
    +	goAwayDebugMessage := t.goAwayDebugMessage
    +	t.mu.Unlock()
     
    +	// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
    +	// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
    +	// also waits for loopyWriter to be closed with a timer to avoid the
    +	// long blocking in case the connection is blackholed, i.e. TCP is
    +	// just stuck.
    +	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
    +	timer := time.NewTimer(goAwayLoopyWriterTimeout)
    +	defer timer.Stop()
    +	select {
    +	case <-t.writerDone: // success
    +	case <-timer.C:
    +		t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
    +	}
    +	t.cancel()
    +	t.conn.Close()
    +	// Waits for the reader and keepalive goroutines to exit before returning to
    +	// ensure all resources are cleaned up before Close can return.
    +	<-t.readerDone
    +	if t.keepaliveEnabled {
    +		<-t.keepaliveDone
    +	}
    +	channelz.RemoveEntry(t.channelz.ID)
     	var st *status.Status
     	if len(goAwayDebugMessage) > 0 {
     		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
    @@ -1032,30 +1067,40 @@ func (t *http2Client) GracefulClose() {
     
     // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
     // should proceed only if Write returns nil.
    -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
    +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
    +	reader := data.Reader()
    +
     	if opts.Last {
     		// If it's the last message, update stream state.
     		if !s.compareAndSwapState(streamActive, streamWriteDone) {
    +			_ = reader.Close()
     			return errStreamDone
     		}
     	} else if s.getState() != streamActive {
    +		_ = reader.Close()
     		return errStreamDone
     	}
     	df := &dataFrame{
     		streamID:  s.id,
     		endStream: opts.Last,
     		h:         hdr,
    -		d:         data,
    +		reader:    reader,
     	}
    -	if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
    -		if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
    +	if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
    +		if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
    +			_ = reader.Close()
     			return err
     		}
     	}
    -	return t.controlBuf.put(df)
    +	if err := t.controlBuf.put(df); err != nil {
    +		_ = reader.Close()
    +		return err
    +	}
    +	t.incrMsgSent()
    +	return nil
     }
     
    -func (t *http2Client) getStream(f http2.Frame) *Stream {
    +func (t *http2Client) getStream(f http2.Frame) *ClientStream {
     	t.mu.Lock()
     	s := t.activeStreams[f.Header().StreamID]
     	t.mu.Unlock()
    @@ -1065,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream {
     // adjustWindow sends out extra window update over the initial window size
     // of stream if the application is requesting data larger in size than
     // the window.
    -func (t *http2Client) adjustWindow(s *Stream, n uint32) {
    +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
     	if w := s.fc.maybeAdjust(n); w > 0 {
     		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
     	}
    @@ -1074,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) {
     // updateWindow adjusts the inbound quota for the stream.
     // Window updates will be sent out when the cumulative quota
     // exceeds the corresponding threshold.
    -func (t *http2Client) updateWindow(s *Stream, n uint32) {
    +func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
     	if w := s.fc.onRead(n); w > 0 {
     		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
     	}
    @@ -1084,7 +1129,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
     // for the transport and the stream based on the current bdp
     // estimation.
     func (t *http2Client) updateFlowControl(n uint32) {
    -	updateIWS := func(any) bool {
    +	updateIWS := func() bool {
     		t.initialWindowSize = int32(n)
     		t.mu.Lock()
     		for _, s := range t.activeStreams {
    @@ -1157,10 +1202,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
     		// guarantee f.Data() is consumed before the arrival of next frame.
     		// Can this copy be eliminated?
     		if len(f.Data()) > 0 {
    -			buffer := t.bufferPool.get()
    -			buffer.Reset()
    -			buffer.Write(f.Data())
    -			s.write(recvMsg{buffer: buffer})
    +			pool := t.bufferPool
    +			if pool == nil {
    +				// Note that this is only supposed to be nil in tests. Otherwise, stream is
    +				// always initialized with a BufferPool.
    +				pool = mem.DefaultBufferPool()
    +			}
    +			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
     		}
     	}
     	// The server has closed the stream without sending trailers.  Record that
    @@ -1177,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
     	}
     	if f.ErrCode == http2.ErrCodeRefusedStream {
     		// The stream was unprocessed by the server.
    -		atomic.StoreUint32(&s.unprocessed, 1)
    +		s.unprocessed.Store(true)
     	}
     	statusCode, ok := http2ErrConvTab[f.ErrCode]
     	if !ok {
    @@ -1189,7 +1237,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
     	if statusCode == codes.Canceled {
     		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
     			// Our deadline was already exceeded, and that was likely the cause
    -			// of this cancelation.  Alter the status code accordingly.
    +			// of this cancellation.  Alter the status code accordingly.
     			statusCode = codes.DeadlineExceeded
     		}
     	}
    @@ -1237,7 +1285,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
     		}
     		updateFuncs = append(updateFuncs, updateStreamQuota)
     	}
    -	t.controlBuf.executeAndPut(func(any) bool {
    +	t.controlBuf.executeAndPut(func() bool {
     		for _, f := range updateFuncs {
     			f()
     		}
    @@ -1258,11 +1306,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
     	t.controlBuf.put(pingAck)
     }
     
    -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
    +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
     	t.mu.Lock()
     	if t.state == closing {
     		t.mu.Unlock()
    -		return
    +		return nil
     	}
     	if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
     		// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
    @@ -1274,8 +1322,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
     	id := f.LastStreamID
     	if id > 0 && id%2 == 0 {
     		t.mu.Unlock()
    -		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
    -		return
    +		return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
     	}
     	// A client can receive multiple GoAways from the server (see
     	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
    @@ -1292,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
     		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
     		if id > t.prevGoAwayID {
     			t.mu.Unlock()
    -			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
    -			return
    +			return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
     		}
     	default:
     		t.setGoAwayReason(f)
    @@ -1317,15 +1363,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
     	t.prevGoAwayID = id
     	if len(t.activeStreams) == 0 {
     		t.mu.Unlock()
    -		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
    -		return
    +		return connectionErrorf(true, nil, "received goaway and there are no active streams")
     	}
     
    -	streamsToClose := make([]*Stream, 0)
    +	streamsToClose := make([]*ClientStream, 0)
     	for streamID, stream := range t.activeStreams {
     		if streamID > id && streamID <= upperLimit {
     			// The stream was unprocessed by the server.
    -			atomic.StoreUint32(&stream.unprocessed, 1)
    +			stream.unprocessed.Store(true)
     			streamsToClose = append(streamsToClose, stream)
     		}
     	}
    @@ -1335,6 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
     	for _, stream := range streamsToClose {
     		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
     	}
    +	return nil
     }
     
     // setGoAwayReason sets the value of t.goAwayReason based
    @@ -1376,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
     		return
     	}
     	endStream := frame.StreamEnded()
    -	atomic.StoreUint32(&s.bytesReceived, 1)
    +	s.bytesReceived.Store(true)
     	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
     
     	if !initialHeader && !endStream {
    @@ -1570,7 +1616,13 @@ func (t *http2Client) readServerPreface() error {
     // network connection.  If the server preface is not read successfully, an
     // error is pushed to errCh; otherwise errCh is closed with no error.
     func (t *http2Client) reader(errCh chan<- error) {
    -	defer close(t.readerDone)
    +	var errClose error
    +	defer func() {
    +		close(t.readerDone)
    +		if errClose != nil {
    +			t.Close(errClose)
    +		}
    +	}()
     
     	if err := t.readServerPreface(); err != nil {
     		errCh <- err
    @@ -1609,11 +1661,10 @@ func (t *http2Client) reader(errCh chan<- error) {
     					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
     				}
     				continue
    -			} else {
    -				// Transport error.
    -				t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
    -				return
     			}
    +			// Transport error.
    +			errClose = connectionErrorf(true, err, "error reading from server: %v", err)
    +			return
     		}
     		switch frame := frame.(type) {
     		case *http2.MetaHeadersFrame:
    @@ -1627,7 +1678,7 @@ func (t *http2Client) reader(errCh chan<- error) {
     		case *http2.PingFrame:
     			t.handlePing(frame)
     		case *http2.GoAwayFrame:
    -			t.handleGoAway(frame)
    +			errClose = t.handleGoAway(frame)
     		case *http2.WindowUpdateFrame:
     			t.handleWindowUpdate(frame)
     		default:
    @@ -1638,15 +1689,15 @@ func (t *http2Client) reader(errCh chan<- error) {
     	}
     }
     
    -func minTime(a, b time.Duration) time.Duration {
    -	if a < b {
    -		return a
    -	}
    -	return b
    -}
    -
     // keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
     func (t *http2Client) keepalive() {
    +	var err error
    +	defer func() {
    +		close(t.keepaliveDone)
    +		if err != nil {
    +			t.Close(err)
    +		}
    +	}()
     	p := &ping{data: [8]byte{}}
     	// True iff a ping has been sent, and no data has been received since then.
     	outstandingPing := false
    @@ -1670,7 +1721,7 @@ func (t *http2Client) keepalive() {
     				continue
     			}
     			if outstandingPing && timeoutLeft <= 0 {
    -				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
    +				err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
     				return
     			}
     			t.mu.Lock()
    @@ -1702,7 +1753,7 @@ func (t *http2Client) keepalive() {
     			// keepalive timer expired. In both cases, we need to send a ping.
     			if !outstandingPing {
     				if channelz.IsOn() {
    -					atomic.AddInt64(&t.czData.kpCount, 1)
    +					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
     				}
     				t.controlBuf.put(p)
     				timeoutLeft = t.kp.Timeout
    @@ -1712,7 +1763,7 @@ func (t *http2Client) keepalive() {
     			// timeoutLeft. This will ensure that we wait only for kp.Time
     			// before sending out the next ping (for cases where the ping is
     			// acked).
    -			sleepDuration := minTime(t.kp.Time, timeoutLeft)
    +			sleepDuration := min(t.kp.Time, timeoutLeft)
     			timeoutLeft -= sleepDuration
     			timer.Reset(sleepDuration)
     		case <-t.ctx.Done():
    @@ -1732,40 +1783,27 @@ func (t *http2Client) GoAway() <-chan struct{} {
     	return t.goAway
     }
     
    -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
    -	s := channelz.SocketInternalMetric{
    -		StreamsStarted:                  atomic.LoadInt64(&t.czData.streamsStarted),
    -		StreamsSucceeded:                atomic.LoadInt64(&t.czData.streamsSucceeded),
    -		StreamsFailed:                   atomic.LoadInt64(&t.czData.streamsFailed),
    -		MessagesSent:                    atomic.LoadInt64(&t.czData.msgSent),
    -		MessagesReceived:                atomic.LoadInt64(&t.czData.msgRecv),
    -		KeepAlivesSent:                  atomic.LoadInt64(&t.czData.kpCount),
    -		LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
    -		LastMessageSentTimestamp:        time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
    -		LastMessageReceivedTimestamp:    time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
    -		LocalFlowControlWindow:          int64(t.fc.getSize()),
    -		SocketOptions:                   channelz.GetSocketOption(t.conn),
    -		LocalAddr:                       t.localAddr,
    -		RemoteAddr:                      t.remoteAddr,
    -		// RemoteName :
    -	}
    -	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
    -		s.Security = au.GetSecurityValue()
    -	}
    -	s.RemoteFlowControlWindow = t.getOutFlowWindow()
    -	return &s
    +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
    +	return &channelz.EphemeralSocketMetrics{
    +		LocalFlowControlWindow:  int64(t.fc.getSize()),
    +		RemoteFlowControlWindow: t.getOutFlowWindow(),
    +	}
     }
     
     func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
     
    -func (t *http2Client) IncrMsgSent() {
    -	atomic.AddInt64(&t.czData.msgSent, 1)
    -	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
    +func (t *http2Client) incrMsgSent() {
    +	if channelz.IsOn() {
    +		t.channelz.SocketMetrics.MessagesSent.Add(1)
    +		t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
    +	}
     }
     
    -func (t *http2Client) IncrMsgRecv() {
    -	atomic.AddInt64(&t.czData.msgRecv, 1)
    -	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
    +func (t *http2Client) incrMsgRecv() {
    +	if channelz.IsOn() {
    +		t.channelz.SocketMetrics.MessagesReceived.Add(1)
    +		t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
    +	}
     }
     
     func (t *http2Client) getOutFlowWindow() int64 {
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    index f6bac0e8a0..0055fddd7e 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
    @@ -25,6 +25,7 @@ import (
     	"fmt"
     	"io"
     	"math"
    +	rand "math/rand/v2"
     	"net"
     	"net/http"
     	"strconv"
    @@ -32,18 +33,18 @@ import (
     	"sync/atomic"
     	"time"
     
    -	"github.com/golang/protobuf/proto"
     	"golang.org/x/net/http2"
     	"golang.org/x/net/http2/hpack"
     	"google.golang.org/grpc/internal/grpclog"
     	"google.golang.org/grpc/internal/grpcutil"
     	"google.golang.org/grpc/internal/pretty"
     	"google.golang.org/grpc/internal/syscall"
    +	"google.golang.org/grpc/mem"
    +	"google.golang.org/protobuf/proto"
     
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/internal/channelz"
    -	"google.golang.org/grpc/internal/grpcrand"
     	"google.golang.org/grpc/internal/grpcsync"
     	"google.golang.org/grpc/keepalive"
     	"google.golang.org/grpc/metadata"
    @@ -110,7 +111,7 @@ type http2Server struct {
     	// already initialized since draining is already underway.
     	drainEvent    *grpcsync.Event
     	state         transportState
    -	activeStreams map[uint32]*Stream
    +	activeStreams map[uint32]*ServerStream
     	// idle is the time instant when the connection went idle.
     	// This is either the beginning of the connection or when the number of
     	// RPCs go down to 0.
    @@ -118,9 +119,8 @@ type http2Server struct {
     	idle time.Time
     
     	// Fields below are for channelz metric collection.
    -	channelzID *channelz.Identifier
    -	czData     *channelzData
    -	bufferPool *bufferPool
    +	channelz   *channelz.Socket
    +	bufferPool mem.BufferPool
     
     	connectionID uint64
     
    @@ -256,15 +256,30 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     		inTapHandle:       config.InTapHandle,
     		fc:                &trInFlow{limit: uint32(icwz)},
     		state:             reachable,
    -		activeStreams:     make(map[uint32]*Stream),
    +		activeStreams:     make(map[uint32]*ServerStream),
     		stats:             config.StatsHandlers,
     		kp:                kp,
     		idle:              time.Now(),
     		kep:               kep,
     		initialWindowSize: iwz,
    -		czData:            new(channelzData),
    -		bufferPool:        newBufferPool(),
    -	}
    +		bufferPool:        config.BufferPool,
    +	}
    +	var czSecurity credentials.ChannelzSecurityValue
    +	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
    +		czSecurity = au.GetSecurityValue()
    +	}
    +	t.channelz = channelz.RegisterSocket(
    +		&channelz.Socket{
    +			SocketType:       channelz.SocketTypeNormal,
    +			Parent:           config.ChannelzParent,
    +			SocketMetrics:    channelz.SocketMetrics{},
    +			EphemeralMetrics: t.socketMetrics,
    +			LocalAddr:        t.peer.LocalAddr,
    +			RemoteAddr:       t.peer.Addr,
    +			SocketOptions:    channelz.GetSocketOption(t.conn),
    +			Security:         czSecurity,
    +		},
    +	)
     	t.logger = prefixLoggerForServerTransport(t)
     
     	t.controlBuf = newControlBuffer(t.done)
    @@ -274,10 +289,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     			updateFlowControl: t.updateFlowControl,
     		}
     	}
    -	t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr))
    -	if err != nil {
    -		return nil, err
    -	}
     
     	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
     	t.framer.writer.Flush()
    @@ -320,10 +331,27 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     	t.handleSettings(sf)
     
     	go func() {
    -		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
    -		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
    -		t.loopy.run()
    +		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
    +		err := t.loopy.run()
     		close(t.loopyWriterDone)
    +		if !isIOError(err) {
    +			// Close the connection if a non-I/O error occurs (for I/O errors
    +			// the reader will also encounter the error and close).  Wait 1
    +			// second before closing the connection, or when the reader is done
    +			// (i.e. the client already closed the connection or a connection
    +			// error occurred).  This avoids the potential problem where there
    +			// is unread data on the receive side of the connection, which, if
    +			// closed, would lead to a TCP RST instead of FIN, and the client
    +			// encountering errors.  For more info:
    +			// https://github.com/grpc/grpc-go/issues/5358
    +			timer := time.NewTimer(time.Second)
    +			defer timer.Stop()
    +			select {
    +			case <-t.readerDone:
    +			case <-timer.C:
    +			}
    +			t.conn.Close()
    +		}
     	}()
     	go t.keepalive()
     	return t, nil
    @@ -331,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
     
     // operateHeaders takes action on the decoded headers. Returns an error if fatal
     // error encountered and transport needs to close, otherwise returns nil.
    -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
    +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
     	// Acquire max stream ID lock for entire duration
     	t.maxStreamMu.Lock()
     	defer t.maxStreamMu.Unlock()
    @@ -357,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     	t.maxStreamID = streamID
     
     	buf := newRecvBuffer()
    -	s := &Stream{
    -		id:               streamID,
    +	s := &ServerStream{
    +		Stream: &Stream{
    +			id:  streamID,
    +			buf: buf,
    +			fc:  &inFlow{limit: uint32(t.initialWindowSize)},
    +		},
     		st:               t,
    -		buf:              buf,
    -		fc:               &inFlow{limit: uint32(t.initialWindowSize)},
     		headerWireLength: int(frame.Header().Length),
     	}
     	var (
    @@ -509,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     	// Attach the received metadata to the context.
     	if len(mdata) > 0 {
     		s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
    -		if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
    -			s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
    -		}
    -		if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
    -			s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
    -		}
     	}
     	t.mu.Lock()
     	if t.state != reachable {
    @@ -576,8 +600,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     	}
     	t.mu.Unlock()
     	if channelz.IsOn() {
    -		atomic.AddInt64(&t.czData.streamsStarted, 1)
    -		atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
    +		t.channelz.SocketMetrics.StreamsStarted.Add(1)
    +		t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
     	}
     	s.requestRead = func(n int) {
     		t.adjustWindow(s, uint32(n))
    @@ -586,10 +610,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
     	s.trReader = &transportReader{
     		reader: &recvBufferReader{
    -			ctx:        s.ctx,
    -			ctxDone:    s.ctxDone,
    -			recv:       s.buf,
    -			freeBuffer: t.bufferPool.put,
    +			ctx:     s.ctx,
    +			ctxDone: s.ctxDone,
    +			recv:    s.buf,
     		},
     		windowHandler: func(n int) {
     			t.updateWindow(s, uint32(n))
    @@ -607,10 +630,10 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
     // HandleStreams receives incoming streams using the given handler. This is
     // typically run in a separate goroutine.
     // traceCtx attaches trace to ctx and returns the new context.
    -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
    +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
     	defer func() {
    -		<-t.loopyWriterDone
     		close(t.readerDone)
    +		<-t.loopyWriterDone
     	}()
     	for {
     		t.controlBuf.throttle()
    @@ -636,18 +659,20 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
     				}
     				continue
     			}
    -			if err == io.EOF || err == io.ErrUnexpectedEOF {
    -				t.Close(err)
    -				return
    -			}
     			t.Close(err)
     			return
     		}
     		switch frame := frame.(type) {
     		case *http2.MetaHeadersFrame:
     			if err := t.operateHeaders(ctx, frame, handle); err != nil {
    -				t.Close(err)
    -				break
    +				// Any error processing client headers, e.g. invalid stream ID,
    +				// is considered a protocol violation.
    +				t.controlBuf.put(&goAway{
    +					code:      http2.ErrCodeProtocol,
    +					debugData: []byte(err.Error()),
    +					closeConn: err,
    +				})
    +				continue
     			}
     		case *http2.DataFrame:
     			t.handleData(frame)
    @@ -669,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
     	}
     }
     
    -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
    +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
     	t.mu.Lock()
     	defer t.mu.Unlock()
     	if t.activeStreams == nil {
    @@ -687,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
     // adjustWindow sends out extra window update over the initial window size
     // of stream if the application is requesting data larger in size than
     // the window.
    -func (t *http2Server) adjustWindow(s *Stream, n uint32) {
    +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
     	if w := s.fc.maybeAdjust(n); w > 0 {
     		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
     	}
    @@ -697,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) {
     // updateWindow adjusts the inbound quota for the stream and the transport.
     // Window updates will deliver to the controller for sending when
     // the cumulative quota exceeds the corresponding threshold.
    -func (t *http2Server) updateWindow(s *Stream, n uint32) {
    +func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
     	if w := s.fc.onRead(n); w > 0 {
     		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
     			increment: w,
    @@ -784,10 +809,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
     		// guarantee f.Data() is consumed before the arrival of next frame.
     		// Can this copy be eliminated?
     		if len(f.Data()) > 0 {
    -			buffer := t.bufferPool.get()
    -			buffer.Reset()
    -			buffer.Write(f.Data())
    -			s.write(recvMsg{buffer: buffer})
    +			pool := t.bufferPool
    +			if pool == nil {
    +				// Note that this is only supposed to be nil in tests. Otherwise, stream is
    +				// always initialized with a BufferPool.
    +				pool = mem.DefaultBufferPool()
    +			}
    +			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
     		}
     	}
     	if f.StreamEnded() {
    @@ -830,7 +858,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
     		}
     		return nil
     	})
    -	t.controlBuf.executeAndPut(func(any) bool {
    +	t.controlBuf.executeAndPut(func() bool {
     		for _, f := range updateFuncs {
     			f()
     		}
    @@ -931,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool {
     	return true
     }
     
    -func (t *http2Server) streamContextErr(s *Stream) error {
    +func (t *http2Server) streamContextErr(s *ServerStream) error {
     	select {
     	case <-t.done:
     		return ErrConnClosing
    @@ -941,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error {
     }
     
     // WriteHeader sends the header metadata md back to the client.
    -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
    +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
     	s.hdrMu.Lock()
     	defer s.hdrMu.Unlock()
     	if s.getState() == streamDone {
    @@ -974,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() {
     	atomic.StoreUint32(&t.resetPingStrikes, 1)
     }
     
    -func (t *http2Server) writeHeaderLocked(s *Stream) error {
    +func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
     	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
     	// first and create a slice of that exact size.
     	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
    @@ -984,12 +1012,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
     		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
     	}
     	headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
    -	success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
    +	hf := &headerFrame{
     		streamID:  s.id,
     		hf:        headerFields,
     		endStream: false,
     		onWrite:   t.setResetPingStrikes,
    -	})
    +	}
    +	success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
     	if !success {
     		if err != nil {
     			return err
    @@ -1013,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
     // There is no further I/O operations being able to perform on this stream.
     // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
     // OK is adopted.
    -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
    +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
     	s.hdrMu.Lock()
     	defer s.hdrMu.Unlock()
     
    @@ -1059,7 +1088,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
     		onWrite:   t.setResetPingStrikes,
     	}
     
    -	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
    +	success, err := t.controlBuf.executeAndPut(func() bool {
    +		return t.checkForHeaderListSize(trailingHeader)
    +	}, nil)
     	if !success {
     		if err != nil {
     			return err
    @@ -1082,27 +1113,38 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
     
     // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
     // is returns if it fails (e.g., framing error, transport error).
    -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
    +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
    +	reader := data.Reader()
    +
     	if !s.isHeaderSent() { // Headers haven't been written yet.
    -		if err := t.WriteHeader(s, nil); err != nil {
    +		if err := t.writeHeader(s, nil); err != nil {
    +			_ = reader.Close()
     			return err
     		}
     	} else {
     		// Writing headers checks for this condition.
     		if s.getState() == streamDone {
    +			_ = reader.Close()
     			return t.streamContextErr(s)
     		}
     	}
    +
     	df := &dataFrame{
     		streamID:    s.id,
     		h:           hdr,
    -		d:           data,
    +		reader:      reader,
     		onEachWrite: t.setResetPingStrikes,
     	}
    -	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
    +	if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
    +		_ = reader.Close()
     		return t.streamContextErr(s)
     	}
    -	return t.controlBuf.put(df)
    +	if err := t.controlBuf.put(df); err != nil {
    +		_ = reader.Close()
    +		return err
    +	}
    +	t.incrMsgSent()
    +	return nil
     }
     
     // keepalive running in a separate goroutine does the following:
    @@ -1178,12 +1220,12 @@ func (t *http2Server) keepalive() {
     				continue
     			}
     			if outstandingPing && kpTimeoutLeft <= 0 {
    -				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
    +				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
     				return
     			}
     			if !outstandingPing {
     				if channelz.IsOn() {
    -					atomic.AddInt64(&t.czData.kpCount, 1)
    +					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
     				}
     				t.controlBuf.put(p)
     				kpTimeoutLeft = t.kp.Timeout
    @@ -1193,7 +1235,7 @@ func (t *http2Server) keepalive() {
     			// timeoutLeft. This will ensure that we wait only for kp.Time
     			// before sending out the next ping (for cases where the ping is
     			// acked).
    -			sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
    +			sleepDuration := min(t.kp.Time, kpTimeoutLeft)
     			kpTimeoutLeft -= sleepDuration
     			kpTimer.Reset(sleepDuration)
     		case <-t.done:
    @@ -1223,7 +1265,7 @@ func (t *http2Server) Close(err error) {
     	if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
     		t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
     	}
    -	channelz.RemoveEntry(t.channelzID)
    +	channelz.RemoveEntry(t.channelz.ID)
     	// Cancel all active streams.
     	for _, s := range streams {
     		s.cancel()
    @@ -1231,7 +1273,7 @@ func (t *http2Server) Close(err error) {
     }
     
     // deleteStream deletes the stream s from transport's active streams.
    -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
    +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
     
     	t.mu.Lock()
     	if _, ok := t.activeStreams[s.id]; ok {
    @@ -1244,15 +1286,15 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
     
     	if channelz.IsOn() {
     		if eosReceived {
    -			atomic.AddInt64(&t.czData.streamsSucceeded, 1)
    +			t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
     		} else {
    -			atomic.AddInt64(&t.czData.streamsFailed, 1)
    +			t.channelz.SocketMetrics.StreamsFailed.Add(1)
     		}
     	}
     }
     
     // finishStream closes the stream and puts the trailing headerFrame into controlbuf.
    -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
    +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
     	// In case stream sending and receiving are invoked in separate
     	// goroutines (e.g., bi-directional streaming), cancel needs to be
     	// called to interrupt the potential blocking on other goroutines.
    @@ -1276,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
     }
     
     // closeStream clears the footprint of a stream when the stream is not needed any more.
    -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
    +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
     	// In case stream sending and receiving are invoked in separate
     	// goroutines (e.g., bi-directional streaming), cancel needs to be
     	// called to interrupt the potential blocking on other goroutines.
    @@ -1329,6 +1371,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
     		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
     			return false, err
     		}
    +		t.framer.writer.Flush()
     		if retErr != nil {
     			return false, retErr
     		}
    @@ -1349,7 +1392,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
     		return false, err
     	}
     	go func() {
    -		timer := time.NewTimer(time.Minute)
    +		timer := time.NewTimer(5 * time.Second)
     		defer timer.Stop()
     		select {
     		case <-t.drainEvent.Done():
    @@ -1362,38 +1405,25 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
     	return false, nil
     }
     
    -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
    -	s := channelz.SocketInternalMetric{
    -		StreamsStarted:                   atomic.LoadInt64(&t.czData.streamsStarted),
    -		StreamsSucceeded:                 atomic.LoadInt64(&t.czData.streamsSucceeded),
    -		StreamsFailed:                    atomic.LoadInt64(&t.czData.streamsFailed),
    -		MessagesSent:                     atomic.LoadInt64(&t.czData.msgSent),
    -		MessagesReceived:                 atomic.LoadInt64(&t.czData.msgRecv),
    -		KeepAlivesSent:                   atomic.LoadInt64(&t.czData.kpCount),
    -		LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
    -		LastMessageSentTimestamp:         time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
    -		LastMessageReceivedTimestamp:     time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
    -		LocalFlowControlWindow:           int64(t.fc.getSize()),
    -		SocketOptions:                    channelz.GetSocketOption(t.conn),
    -		LocalAddr:                        t.peer.LocalAddr,
    -		RemoteAddr:                       t.peer.Addr,
    -		// RemoteName :
    -	}
    -	if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok {
    -		s.Security = au.GetSecurityValue()
    -	}
    -	s.RemoteFlowControlWindow = t.getOutFlowWindow()
    -	return &s
    +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
    +	return &channelz.EphemeralSocketMetrics{
    +		LocalFlowControlWindow:  int64(t.fc.getSize()),
    +		RemoteFlowControlWindow: t.getOutFlowWindow(),
    +	}
     }
     
    -func (t *http2Server) IncrMsgSent() {
    -	atomic.AddInt64(&t.czData.msgSent, 1)
    -	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
    +func (t *http2Server) incrMsgSent() {
    +	if channelz.IsOn() {
    +		t.channelz.SocketMetrics.MessagesSent.Add(1)
    +		t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
    +	}
     }
     
    -func (t *http2Server) IncrMsgRecv() {
    -	atomic.AddInt64(&t.czData.msgRecv, 1)
    -	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
    +func (t *http2Server) incrMsgRecv() {
    +	if channelz.IsOn() {
    +		t.channelz.SocketMetrics.MessagesReceived.Add(1)
    +		t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
    +	}
     }
     
     func (t *http2Server) getOutFlowWindow() int64 {
    @@ -1426,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration {
     	}
     	// Generate a jitter between +/- 10% of the value.
     	r := int64(v / 10)
    -	j := grpcrand.Int63n(2*r) - r
    +	j := rand.Int64N(2*r) - r
     	return time.Duration(j)
     }
     
    diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
    index dc29d590e9..3613d7b648 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
    @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
     	return w
     }
     
    -func (w *bufWriter) Write(b []byte) (n int, err error) {
    +func (w *bufWriter) Write(b []byte) (int, error) {
     	if w.err != nil {
     		return 0, w.err
     	}
     	if w.batchSize == 0 { // Buffer has been disabled.
    -		n, err = w.conn.Write(b)
    +		n, err := w.conn.Write(b)
     		return n, toIOError(err)
     	}
     	if w.buf == nil {
     		b := w.pool.Get().(*[]byte)
     		w.buf = *b
     	}
    +	written := 0
     	for len(b) > 0 {
    -		nn := copy(w.buf[w.offset:], b)
    -		b = b[nn:]
    -		w.offset += nn
    -		n += nn
    -		if w.offset >= w.batchSize {
    -			err = w.flushKeepBuffer()
    +		copied := copy(w.buf[w.offset:], b)
    +		b = b[copied:]
    +		written += copied
    +		w.offset += copied
    +		if w.offset < w.batchSize {
    +			continue
    +		}
    +		if err := w.flushKeepBuffer(); err != nil {
    +			return written, err
     		}
     	}
    -	return n, err
    +	return written, nil
     }
     
     func (w *bufWriter) Flush() error {
    @@ -389,7 +393,7 @@ type framer struct {
     	fr     *http2.Framer
     }
     
    -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
    +var writeBufferPoolMap = make(map[int]*sync.Pool)
     var writeBufferMutex sync.Mutex
     
     func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
    @@ -418,10 +422,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
     	return f
     }
     
    -func getWriteBufferPool(writeBufferSize int) *sync.Pool {
    +func getWriteBufferPool(size int) *sync.Pool {
     	writeBufferMutex.Lock()
     	defer writeBufferMutex.Unlock()
    -	size := writeBufferSize * 2
     	pool, ok := writeBufferPoolMap[size]
     	if ok {
     		return pool
    diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
    index 24fa103257..54b2244365 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
    @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
     		}
     		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
     	}
    -
    -	return &bufConn{Conn: conn, r: r}, nil
    +	// The buffer could contain extra bytes from the target server, so we can't
    +	// discard it. However, in many cases where the server waits for the client
    +	// to send the first message (e.g. when TLS is being used), the buffer will
    +	// be empty, so we can avoid the overhead of reading through this buffer.
    +	if r.Buffered() != 0 {
    +		return &bufConn{Conn: conn, r: r}, nil
    +	}
    +	return conn, nil
     }
     
     // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
    diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
    new file mode 100644
    index 0000000000..a22a901514
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
    @@ -0,0 +1,178 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package transport
    +
    +import (
    +	"context"
    +	"errors"
    +	"strings"
    +	"sync"
    +	"sync/atomic"
    +
    +	"google.golang.org/grpc/mem"
    +	"google.golang.org/grpc/metadata"
    +	"google.golang.org/grpc/status"
    +)
    +
    +// ServerStream implements streaming functionality for a gRPC server.
    +type ServerStream struct {
    +	*Stream // Embed for common stream functionality.
    +
    +	st      internalServerTransport
    +	ctxDone <-chan struct{}    // closed at the end of stream.  Cache of ctx.Done() (for performance)
    +	cancel  context.CancelFunc // invoked at the end of stream to cancel ctx.
    +
    +	// Holds compressor names passed in grpc-accept-encoding metadata from the
    +	// client.
    +	clientAdvertisedCompressors string
    +	headerWireLength            int
    +
    +	// hdrMu protects outgoing header and trailer metadata.
    +	hdrMu      sync.Mutex
    +	header     metadata.MD // the outgoing header metadata.  Updated by WriteHeader.
    +	headerSent atomic.Bool // atomically set when the headers are sent out.
    +}
    +
    +// Read reads an n byte message from the input stream.
    +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
    +	b, err := s.Stream.read(n)
    +	if err == nil {
    +		s.st.incrMsgRecv()
    +	}
    +	return b, err
    +}
    +
    +// SendHeader sends the header metadata for the given stream.
    +func (s *ServerStream) SendHeader(md metadata.MD) error {
    +	return s.st.writeHeader(s, md)
    +}
    +
    +// Write writes the hdr and data bytes to the output stream.
    +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
    +	return s.st.write(s, hdr, data, opts)
    +}
    +
    +// WriteStatus sends the status of a stream to the client.  WriteStatus is
    +// the final call made on a stream and always occurs.
    +func (s *ServerStream) WriteStatus(st *status.Status) error {
    +	return s.st.writeStatus(s, st)
    +}
    +
    +// isHeaderSent indicates whether headers have been sent.
    +func (s *ServerStream) isHeaderSent() bool {
    +	return s.headerSent.Load()
    +}
    +
    +// updateHeaderSent updates headerSent and returns true
    +// if it was already set.
    +func (s *ServerStream) updateHeaderSent() bool {
    +	return s.headerSent.Swap(true)
    +}
    +
    +// RecvCompress returns the compression algorithm applied to the inbound
    +// message. It is empty string if there is no compression applied.
    +func (s *ServerStream) RecvCompress() string {
    +	return s.recvCompress
    +}
    +
    +// SendCompress returns the send compressor name.
    +func (s *ServerStream) SendCompress() string {
    +	return s.sendCompress
    +}
    +
    +// ContentSubtype returns the content-subtype for a request. For example, a
    +// content-subtype of "proto" will result in a content-type of
    +// "application/grpc+proto". This will always be lowercase.  See
    +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
    +// more details.
    +func (s *ServerStream) ContentSubtype() string {
    +	return s.contentSubtype
    +}
    +
    +// SetSendCompress sets the compression algorithm to the stream.
    +func (s *ServerStream) SetSendCompress(name string) error {
    +	if s.isHeaderSent() || s.getState() == streamDone {
    +		return errors.New("transport: set send compressor called after headers sent or stream done")
    +	}
    +
    +	s.sendCompress = name
    +	return nil
    +}
    +
    +// SetContext sets the context of the stream. This will be deleted once the
    +// stats handler callouts all move to gRPC layer.
    +func (s *ServerStream) SetContext(ctx context.Context) {
    +	s.ctx = ctx
    +}
    +
    +// ClientAdvertisedCompressors returns the compressor names advertised by the
    +// client via grpc-accept-encoding header.
    +func (s *ServerStream) ClientAdvertisedCompressors() []string {
    +	values := strings.Split(s.clientAdvertisedCompressors, ",")
    +	for i, v := range values {
    +		values[i] = strings.TrimSpace(v)
    +	}
    +	return values
    +}
    +
    +// Header returns the header metadata of the stream.  It returns the out header
    +// after t.WriteHeader is called.  It does not block and must not be called
    +// until after WriteHeader.
    +func (s *ServerStream) Header() (metadata.MD, error) {
    +	// Return the header in stream. It will be the out
    +	// header after t.WriteHeader is called.
    +	return s.header.Copy(), nil
    +}
    +
    +// HeaderWireLength returns the size of the headers of the stream as received
    +// from the wire.
    +func (s *ServerStream) HeaderWireLength() int {
    +	return s.headerWireLength
    +}
    +
    +// SetHeader sets the header metadata. This can be called multiple times.
    +// This should not be called in parallel to other data writes.
    +func (s *ServerStream) SetHeader(md metadata.MD) error {
    +	if md.Len() == 0 {
    +		return nil
    +	}
    +	if s.isHeaderSent() || s.getState() == streamDone {
    +		return ErrIllegalHeaderWrite
    +	}
    +	s.hdrMu.Lock()
    +	s.header = metadata.Join(s.header, md)
    +	s.hdrMu.Unlock()
    +	return nil
    +}
    +
    +// SetTrailer sets the trailer metadata which will be sent with the RPC status
    +// by the server. This can be called multiple times.
    +// This should not be called parallel to other data writes.
    +func (s *ServerStream) SetTrailer(md metadata.MD) error {
    +	if md.Len() == 0 {
    +		return nil
    +	}
    +	if s.getState() == streamDone {
    +		return ErrIllegalHeaderWrite
    +	}
    +	s.hdrMu.Lock()
    +	s.trailer = metadata.Join(s.trailer, md)
    +	s.hdrMu.Unlock()
    +	return nil
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
    index b7b8fec180..2859b87755 100644
    --- a/vendor/google.golang.org/grpc/internal/transport/transport.go
    +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
    @@ -22,7 +22,6 @@
     package transport
     
     import (
    -	"bytes"
     	"context"
     	"errors"
     	"fmt"
    @@ -36,9 +35,9 @@ import (
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/internal/channelz"
     	"google.golang.org/grpc/keepalive"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
    -	"google.golang.org/grpc/resolver"
     	"google.golang.org/grpc/stats"
     	"google.golang.org/grpc/status"
     	"google.golang.org/grpc/tap"
    @@ -46,32 +45,10 @@ import (
     
     const logLevel = 2
     
    -type bufferPool struct {
    -	pool sync.Pool
    -}
    -
    -func newBufferPool() *bufferPool {
    -	return &bufferPool{
    -		pool: sync.Pool{
    -			New: func() any {
    -				return new(bytes.Buffer)
    -			},
    -		},
    -	}
    -}
    -
    -func (p *bufferPool) get() *bytes.Buffer {
    -	return p.pool.Get().(*bytes.Buffer)
    -}
    -
    -func (p *bufferPool) put(b *bytes.Buffer) {
    -	p.pool.Put(b)
    -}
    -
     // recvMsg represents the received msg from the transport. All transport
     // protocol specific info has been removed.
     type recvMsg struct {
    -	buffer *bytes.Buffer
    +	buffer mem.Buffer
     	// nil: received some data
     	// io.EOF: stream is completed. data is nil.
     	// other non-nil error: transport failure. data is nil.
    @@ -101,6 +78,9 @@ func newRecvBuffer() *recvBuffer {
     func (b *recvBuffer) put(r recvMsg) {
     	b.mu.Lock()
     	if b.err != nil {
    +		// drop the buffer on the floor. Since b.err is not nil, any subsequent reads
    +		// will always return an error, making this buffer inaccessible.
    +		r.buffer.Free()
     		b.mu.Unlock()
     		// An error had occurred earlier, don't accept more
     		// data or errors.
    @@ -147,45 +127,97 @@ type recvBufferReader struct {
     	ctx         context.Context
     	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
     	recv        *recvBuffer
    -	last        *bytes.Buffer // Stores the remaining data in the previous calls.
    +	last        mem.Buffer // Stores the remaining data in the previous calls.
     	err         error
    -	freeBuffer  func(*bytes.Buffer)
     }
     
    -// Read reads the next len(p) bytes from last. If last is drained, it tries to
    -// read additional data from recv. It blocks if there no additional data available
    -// in recv. If Read returns any non-nil error, it will continue to return that error.
    -func (r *recvBufferReader) Read(p []byte) (n int, err error) {
    +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
     	if r.err != nil {
     		return 0, r.err
     	}
     	if r.last != nil {
    -		// Read remaining data left in last call.
    -		copied, _ := r.last.Read(p)
    -		if r.last.Len() == 0 {
    -			r.freeBuffer(r.last)
    +		n, r.last = mem.ReadUnsafe(header, r.last)
    +		return n, nil
    +	}
    +	if r.closeStream != nil {
    +		n, r.err = r.readMessageHeaderClient(header)
    +	} else {
    +		n, r.err = r.readMessageHeader(header)
    +	}
    +	return n, r.err
    +}
    +
    +// Read reads the next n bytes from last. If last is drained, it tries to read
    +// additional data from recv. It blocks if there no additional data available in
    +// recv. If Read returns any non-nil error, it will continue to return that
    +// error.
    +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
    +	if r.err != nil {
    +		return nil, r.err
    +	}
    +	if r.last != nil {
    +		buf = r.last
    +		if r.last.Len() > n {
    +			buf, r.last = mem.SplitUnsafe(buf, n)
    +		} else {
     			r.last = nil
     		}
    -		return copied, nil
    +		return buf, nil
     	}
     	if r.closeStream != nil {
    -		n, r.err = r.readClient(p)
    +		buf, r.err = r.readClient(n)
     	} else {
    -		n, r.err = r.read(p)
    +		buf, r.err = r.read(n)
     	}
    -	return n, r.err
    +	return buf, r.err
     }
     
    -func (r *recvBufferReader) read(p []byte) (n int, err error) {
    +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
     	select {
     	case <-r.ctxDone:
     		return 0, ContextErr(r.ctx.Err())
     	case m := <-r.recv.get():
    -		return r.readAdditional(m, p)
    +		return r.readMessageHeaderAdditional(m, header)
    +	}
    +}
    +
    +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
    +	select {
    +	case <-r.ctxDone:
    +		return nil, ContextErr(r.ctx.Err())
    +	case m := <-r.recv.get():
    +		return r.readAdditional(m, n)
    +	}
    +}
    +
    +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
    +	// If the context is canceled, then closes the stream with nil metadata.
    +	// closeStream writes its error parameter to r.recv as a recvMsg.
    +	// r.readAdditional acts on that message and returns the necessary error.
    +	select {
    +	case <-r.ctxDone:
    +		// Note that this adds the ctx error to the end of recv buffer, and
    +		// reads from the head. This will delay the error until recv buffer is
    +		// empty, thus will delay ctx cancellation in Recv().
    +		//
    +		// It's done this way to fix a race between ctx cancel and trailer. The
    +		// race was, stream.Recv() may return ctx error if ctxDone wins the
    +		// race, but stream.Trailer() may return a non-nil md because the stream
    +		// was not marked as done when trailer is received. This closeStream
    +		// call will mark stream as done, thus fix the race.
    +		//
    +		// TODO: delaying ctx error seems like a unnecessary side effect. What
    +		// we really want is to mark the stream as done, and return ctx error
    +		// faster.
    +		r.closeStream(ContextErr(r.ctx.Err()))
    +		m := <-r.recv.get()
    +		return r.readMessageHeaderAdditional(m, header)
    +	case m := <-r.recv.get():
    +		return r.readMessageHeaderAdditional(m, header)
     	}
     }
     
    -func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
    +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
     	// If the context is canceled, then closes the stream with nil metadata.
     	// closeStream writes its error parameter to r.recv as a recvMsg.
     	// r.readAdditional acts on that message and returns the necessary error.
    @@ -206,25 +238,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
     		// faster.
     		r.closeStream(ContextErr(r.ctx.Err()))
     		m := <-r.recv.get()
    -		return r.readAdditional(m, p)
    +		return r.readAdditional(m, n)
     	case m := <-r.recv.get():
    -		return r.readAdditional(m, p)
    +		return r.readAdditional(m, n)
     	}
     }
     
    -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
    +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
     	r.recv.load()
     	if m.err != nil {
    +		if m.buffer != nil {
    +			m.buffer.Free()
    +		}
     		return 0, m.err
     	}
    -	copied, _ := m.buffer.Read(p)
    -	if m.buffer.Len() == 0 {
    -		r.freeBuffer(m.buffer)
    -		r.last = nil
    -	} else {
    -		r.last = m.buffer
    +
    +	n, r.last = mem.ReadUnsafe(header, m.buffer)
    +
    +	return n, nil
    +}
    +
    +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
    +	r.recv.load()
    +	if m.err != nil {
    +		if m.buffer != nil {
    +			m.buffer.Free()
    +		}
    +		return nil, m.err
    +	}
    +
    +	if m.buffer.Len() > n {
    +		m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
     	}
    -	return copied, nil
    +
    +	return m.buffer, nil
     }
     
     type streamState uint32
    @@ -239,73 +286,26 @@ const (
     // Stream represents an RPC in the transport layer.
     type Stream struct {
     	id           uint32
    -	st           ServerTransport    // nil for client side Stream
    -	ct           *http2Client       // nil for server side Stream
    -	ctx          context.Context    // the associated context of the stream
    -	cancel       context.CancelFunc // always nil for client side Stream
    -	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
    -	doneFunc     func()             // invoked at the end of stream on client side.
    -	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
    -	method       string             // the associated RPC method of the stream
    +	ctx          context.Context // the associated context of the stream
    +	method       string          // the associated RPC method of the stream
     	recvCompress string
     	sendCompress string
     	buf          *recvBuffer
    -	trReader     io.Reader
    +	trReader     *transportReader
     	fc           *inFlow
     	wq           *writeQuota
     
    -	// Holds compressor names passed in grpc-accept-encoding metadata from the
    -	// client. This is empty for the client side stream.
    -	clientAdvertisedCompressors string
     	// Callback to state application's intentions to read data. This
     	// is used to adjust flow control, if needed.
     	requestRead func(int)
     
    -	headerChan       chan struct{} // closed to indicate the end of header metadata.
    -	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
    -	// headerValid indicates whether a valid header was received.  Only
    -	// meaningful after headerChan is closed (always call waitOnHeader() before
    -	// reading its value).  Not valid on server side.
    -	headerValid      bool
    -	headerWireLength int // Only set on server side.
    -
    -	// hdrMu protects header and trailer metadata on the server-side.
    -	hdrMu sync.Mutex
    -	// On client side, header keeps the received header metadata.
    -	//
    -	// On server side, header keeps the header set by SetHeader(). The complete
    -	// header will merged into this after t.WriteHeader() is called.
    -	header  metadata.MD
    -	trailer metadata.MD // the key-value map of trailer metadata.
    -
    -	noHeaders bool // set if the client never received headers (set only after the stream is done).
    -
    -	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
    -	headerSent uint32
    -
     	state streamState
     
    -	// On client-side it is the status error received from the server.
    -	// On server-side it is unused.
    -	status *status.Status
    -
    -	bytesReceived uint32 // indicates whether any bytes have been received on this stream
    -	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream
    -
     	// contentSubtype is the content-subtype for requests.
     	// this must be lowercase or the behavior is undefined.
     	contentSubtype string
    -}
     
    -// isHeaderSent is only valid on the server-side.
    -func (s *Stream) isHeaderSent() bool {
    -	return atomic.LoadUint32(&s.headerSent) == 1
    -}
    -
    -// updateHeaderSent updates headerSent and returns true
    -// if it was alreay set. It is valid only on server-side.
    -func (s *Stream) updateHeaderSent() bool {
    -	return atomic.SwapUint32(&s.headerSent, 1) == 1
    +	trailer metadata.MD // the key-value map of trailer metadata.
     }
     
     func (s *Stream) swapState(st streamState) streamState {
    @@ -320,106 +320,12 @@ func (s *Stream) getState() streamState {
     	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
     }
     
    -func (s *Stream) waitOnHeader() {
    -	if s.headerChan == nil {
    -		// On the server headerChan is always nil since a stream originates
    -		// only after having received headers.
    -		return
    -	}
    -	select {
    -	case <-s.ctx.Done():
    -		// Close the stream to prevent headers/trailers from changing after
    -		// this function returns.
    -		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
    -		// headerChan could possibly not be closed yet if closeStream raced
    -		// with operateHeaders; wait until it is closed explicitly here.
    -		<-s.headerChan
    -	case <-s.headerChan:
    -	}
    -}
    -
    -// RecvCompress returns the compression algorithm applied to the inbound
    -// message. It is empty string if there is no compression applied.
    -func (s *Stream) RecvCompress() string {
    -	s.waitOnHeader()
    -	return s.recvCompress
    -}
    -
    -// SetSendCompress sets the compression algorithm to the stream.
    -func (s *Stream) SetSendCompress(name string) error {
    -	if s.isHeaderSent() || s.getState() == streamDone {
    -		return errors.New("transport: set send compressor called after headers sent or stream done")
    -	}
    -
    -	s.sendCompress = name
    -	return nil
    -}
    -
    -// SendCompress returns the send compressor name.
    -func (s *Stream) SendCompress() string {
    -	return s.sendCompress
    -}
    -
    -// ClientAdvertisedCompressors returns the compressor names advertised by the
    -// client via grpc-accept-encoding header.
    -func (s *Stream) ClientAdvertisedCompressors() string {
    -	return s.clientAdvertisedCompressors
    -}
    -
    -// Done returns a channel which is closed when it receives the final status
    -// from the server.
    -func (s *Stream) Done() <-chan struct{} {
    -	return s.done
    -}
    -
    -// Header returns the header metadata of the stream.
    -//
    -// On client side, it acquires the key-value pairs of header metadata once it is
    -// available. It blocks until i) the metadata is ready or ii) there is no header
    -// metadata or iii) the stream is canceled/expired.
    -//
    -// On server side, it returns the out header after t.WriteHeader is called.  It
    -// does not block and must not be called until after WriteHeader.
    -func (s *Stream) Header() (metadata.MD, error) {
    -	if s.headerChan == nil {
    -		// On server side, return the header in stream. It will be the out
    -		// header after t.WriteHeader is called.
    -		return s.header.Copy(), nil
    -	}
    -	s.waitOnHeader()
    -
    -	if !s.headerValid || s.noHeaders {
    -		return nil, s.status.Err()
    -	}
    -
    -	return s.header.Copy(), nil
    -}
    -
    -// TrailersOnly blocks until a header or trailers-only frame is received and
    -// then returns true if the stream was trailers-only.  If the stream ends
    -// before headers are received, returns true, nil.  Client-side only.
    -func (s *Stream) TrailersOnly() bool {
    -	s.waitOnHeader()
    -	return s.noHeaders
    -}
    -
    -// Trailer returns the cached trailer metedata. Note that if it is not called
    -// after the entire stream is done, it could return an empty MD. Client
    -// side only.
    +// Trailer returns the cached trailer metadata. Note that if it is not called
    +// after the entire stream is done, it could return an empty MD.
     // It can be safely read only after stream has ended that is either read
     // or write have returned io.EOF.
     func (s *Stream) Trailer() metadata.MD {
    -	c := s.trailer.Copy()
    -	return c
    -}
    -
    -// ContentSubtype returns the content-subtype for a request. For example, a
    -// content-subtype of "proto" will result in a content-type of
    -// "application/grpc+proto". This will always be lowercase.  See
    -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
    -// more details.
    -func (s *Stream) ContentSubtype() string {
    -	return s.contentSubtype
    +	return s.trailer.Copy()
     }
     
     // Context returns the context of the stream.
    @@ -427,114 +333,104 @@ func (s *Stream) Context() context.Context {
     	return s.ctx
     }
     
    -// SetContext sets the context of the stream. This will be deleted once the
    -// stats handler callouts all move to gRPC layer.
    -func (s *Stream) SetContext(ctx context.Context) {
    -	s.ctx = ctx
    -}
    -
     // Method returns the method for the stream.
     func (s *Stream) Method() string {
     	return s.method
     }
     
    -// Status returns the status received from the server.
    -// Status can be read safely only after the stream has ended,
    -// that is, after Done() is closed.
    -func (s *Stream) Status() *status.Status {
    -	return s.status
    -}
    -
    -// HeaderWireLength returns the size of the headers of the stream as received
    -// from the wire. Valid only on the server.
    -func (s *Stream) HeaderWireLength() int {
    -	return s.headerWireLength
    -}
    -
    -// SetHeader sets the header metadata. This can be called multiple times.
    -// Server side only.
    -// This should not be called in parallel to other data writes.
    -func (s *Stream) SetHeader(md metadata.MD) error {
    -	if md.Len() == 0 {
    -		return nil
    -	}
    -	if s.isHeaderSent() || s.getState() == streamDone {
    -		return ErrIllegalHeaderWrite
    -	}
    -	s.hdrMu.Lock()
    -	s.header = metadata.Join(s.header, md)
    -	s.hdrMu.Unlock()
    -	return nil
    -}
    -
    -// SendHeader sends the given header metadata. The given metadata is
    -// combined with any metadata set by previous calls to SetHeader and
    -// then written to the transport stream.
    -func (s *Stream) SendHeader(md metadata.MD) error {
    -	return s.st.WriteHeader(s, md)
    +func (s *Stream) write(m recvMsg) {
    +	s.buf.put(m)
     }
     
    -// SetTrailer sets the trailer metadata which will be sent with the RPC status
    -// by the server. This can be called multiple times. Server side only.
    -// This should not be called parallel to other data writes.
    -func (s *Stream) SetTrailer(md metadata.MD) error {
    -	if md.Len() == 0 {
    -		return nil
    +// ReadMessageHeader reads data into the provided header slice from the stream.
    +// It first checks if there was an error during a previous read operation and
    +// returns it if present. It then requests a read operation for the length of
    +// the header. It continues to read from the stream until the entire header
    +// slice is filled or an error occurs. If an `io.EOF` error is encountered with
    +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
    +// unexpected end of the stream. The method returns any error encountered during
    +// the read process or nil if the header was successfully read.
    +func (s *Stream) ReadMessageHeader(header []byte) (err error) {
    +	// Don't request a read if there was an error earlier
    +	if er := s.trReader.er; er != nil {
    +		return er
     	}
    -	if s.getState() == streamDone {
    -		return ErrIllegalHeaderWrite
    +	s.requestRead(len(header))
    +	for len(header) != 0 {
    +		n, err := s.trReader.ReadMessageHeader(header)
    +		header = header[n:]
    +		if len(header) == 0 {
    +			err = nil
    +		}
    +		if err != nil {
    +			if n > 0 && err == io.EOF {
    +				err = io.ErrUnexpectedEOF
    +			}
    +			return err
    +		}
     	}
    -	s.hdrMu.Lock()
    -	s.trailer = metadata.Join(s.trailer, md)
    -	s.hdrMu.Unlock()
     	return nil
     }
     
    -func (s *Stream) write(m recvMsg) {
    -	s.buf.put(m)
    -}
    -
    -// Read reads all p bytes from the wire for this stream.
    -func (s *Stream) Read(p []byte) (n int, err error) {
    +// Read reads n bytes from the wire for this stream.
    +func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
     	// Don't request a read if there was an error earlier
    -	if er := s.trReader.(*transportReader).er; er != nil {
    -		return 0, er
    +	if er := s.trReader.er; er != nil {
    +		return nil, er
     	}
    -	s.requestRead(len(p))
    -	return io.ReadFull(s.trReader, p)
    +	s.requestRead(n)
    +	for n != 0 {
    +		buf, err := s.trReader.Read(n)
    +		var bufLen int
    +		if buf != nil {
    +			bufLen = buf.Len()
    +		}
    +		n -= bufLen
    +		if n == 0 {
    +			err = nil
    +		}
    +		if err != nil {
    +			if bufLen > 0 && err == io.EOF {
    +				err = io.ErrUnexpectedEOF
    +			}
    +			data.Free()
    +			return nil, err
    +		}
    +		data = append(data, buf)
    +	}
    +	return data, nil
     }
     
    -// tranportReader reads all the data available for this Stream from the transport and
    +// transportReader reads all the data available for this Stream from the transport and
     // passes them into the decoder, which converts them into a gRPC message stream.
     // The error is io.EOF when the stream is done or another non-nil error if
     // the stream broke.
     type transportReader struct {
    -	reader io.Reader
    +	reader *recvBufferReader
     	// The handler to control the window update procedure for both this
     	// particular stream and the associated transport.
     	windowHandler func(int)
     	er            error
     }
     
    -func (t *transportReader) Read(p []byte) (n int, err error) {
    -	n, err = t.reader.Read(p)
    +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
    +	n, err := t.reader.ReadMessageHeader(header)
     	if err != nil {
     		t.er = err
    -		return
    +		return 0, err
     	}
     	t.windowHandler(n)
    -	return
    +	return n, nil
     }
     
    -// BytesReceived indicates whether any bytes have been received on this stream.
    -func (s *Stream) BytesReceived() bool {
    -	return atomic.LoadUint32(&s.bytesReceived) == 1
    -}
    -
    -// Unprocessed indicates whether the server did not process this stream --
    -// i.e. it sent a refused stream or GOAWAY including this stream ID.
    -func (s *Stream) Unprocessed() bool {
    -	return atomic.LoadUint32(&s.unprocessed) == 1
    +func (t *transportReader) Read(n int) (mem.Buffer, error) {
    +	buf, err := t.reader.Read(n)
    +	if err != nil {
    +		t.er = err
    +		return buf, err
    +	}
    +	t.windowHandler(buf.Len())
    +	return buf, nil
     }
     
     // GoString is implemented by Stream so context.String() won't
    @@ -566,9 +462,10 @@ type ServerConfig struct {
     	WriteBufferSize       int
     	ReadBufferSize        int
     	SharedWriteBuffer     bool
    -	ChannelzParentID      *channelz.Identifier
    +	ChannelzParent        *channelz.Server
     	MaxHeaderListSize     *uint32
     	HeaderTableSize       *uint32
    +	BufferPool            mem.BufferPool
     }
     
     // ConnectOptions covers all relevant options for communicating with the server.
    @@ -601,23 +498,19 @@ type ConnectOptions struct {
     	ReadBufferSize int
     	// SharedWriteBuffer indicates whether connections should reuse write buffer
     	SharedWriteBuffer bool
    -	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
    -	ChannelzParentID *channelz.Identifier
    +	// ChannelzParent sets the addrConn id which initiated the creation of this client transport.
    +	ChannelzParent *channelz.SubChannel
     	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
     	MaxHeaderListSize *uint32
     	// UseProxy specifies if a proxy should be used.
     	UseProxy bool
    +	// The mem.BufferPool to use when reading/writing to the wire.
    +	BufferPool mem.BufferPool
     }
     
    -// NewClientTransport establishes the transport with the required ConnectOptions
    -// and returns it to the caller.
    -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
    -	return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
    -}
    -
    -// Options provides additional hints and information for message
    +// WriteOptions provides additional hints and information for message
     // transmission.
    -type Options struct {
    +type WriteOptions struct {
     	// Last indicates whether this write is the last piece for
     	// this stream.
     	Last bool
    @@ -666,18 +559,8 @@ type ClientTransport interface {
     	// It does not block.
     	GracefulClose()
     
    -	// Write sends the data for the given stream. A nil stream indicates
    -	// the write is to be performed on the transport as a whole.
    -	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
    -
     	// NewStream creates a Stream for an RPC.
    -	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
    -
    -	// CloseStream clears the footprint of a stream when the stream is
    -	// not needed any more. The err indicates the error incurred when
    -	// CloseStream is called. Must be called when a stream is finished
    -	// unless the associated transport is closing.
    -	CloseStream(stream *Stream, err error)
    +	NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
     
     	// Error returns a channel that is closed when some I/O error
     	// happens. Typically the caller should have a goroutine to monitor
    @@ -697,12 +580,6 @@ type ClientTransport interface {
     
     	// RemoteAddr returns the remote network address.
     	RemoteAddr() net.Addr
    -
    -	// IncrMsgSent increments the number of message sent through this transport.
    -	IncrMsgSent()
    -
    -	// IncrMsgRecv increments the number of message received through this transport.
    -	IncrMsgRecv()
     }
     
     // ServerTransport is the common interface for all gRPC server-side transport
    @@ -712,19 +589,7 @@ type ClientTransport interface {
     // Write methods for a given Stream will be called serially.
     type ServerTransport interface {
     	// HandleStreams receives incoming streams using the given handler.
    -	HandleStreams(context.Context, func(*Stream))
    -
    -	// WriteHeader sends the header metadata for the given stream.
    -	// WriteHeader may not be called on all streams.
    -	WriteHeader(s *Stream, md metadata.MD) error
    -
    -	// Write sends the data for the given stream.
    -	// Write may not be called on all streams.
    -	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
    -
    -	// WriteStatus sends the status of a stream to the client.  WriteStatus is
    -	// the final call made on a stream and always occurs.
    -	WriteStatus(s *Stream, st *status.Status) error
    +	HandleStreams(context.Context, func(*ServerStream))
     
     	// Close tears down the transport. Once it is called, the transport
     	// should not be accessed any more. All the pending streams and their
    @@ -736,12 +601,14 @@ type ServerTransport interface {
     
     	// Drain notifies the client this ServerTransport stops accepting new RPCs.
     	Drain(debugData string)
    +}
     
    -	// IncrMsgSent increments the number of message sent through this transport.
    -	IncrMsgSent()
    -
    -	// IncrMsgRecv increments the number of message received through this transport.
    -	IncrMsgRecv()
    +type internalServerTransport interface {
    +	ServerTransport
    +	writeHeader(s *ServerStream, md metadata.MD) error
    +	write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
    +	writeStatus(s *ServerStream, st *status.Status) error
    +	incrMsgRecv()
     }
     
     // connectionErrorf creates an ConnectionError with the specified error description.
    @@ -793,7 +660,7 @@ var (
     	// connection is draining. This could be caused by goaway or balancer
     	// removing the address.
     	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
    -	// errStreamDone is returned from write at the client side to indiacte application
    +	// errStreamDone is returned from write at the client side to indicate application
     	// layer of an error.
     	errStreamDone = errors.New("the stream is done")
     	// StatusGoAway indicates that the server sent a GOAWAY that included this
    @@ -815,30 +682,6 @@ const (
     	GoAwayTooManyPings GoAwayReason = 2
     )
     
    -// channelzData is used to store channelz related data for http2Client and http2Server.
    -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
    -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
    -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
    -type channelzData struct {
    -	kpCount int64
    -	// The number of streams that have started, including already finished ones.
    -	streamsStarted int64
    -	// Client side: The number of streams that have ended successfully by receiving
    -	// EoS bit set frame from server.
    -	// Server side: The number of streams that have ended successfully by sending
    -	// frame with EoS bit set.
    -	streamsSucceeded int64
    -	streamsFailed    int64
    -	// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
    -	// instead of time.Time since it's more costly to atomically update time.Time variable than int64
    -	// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
    -	lastStreamCreatedTime int64
    -	msgSent               int64
    -	msgRecv               int64
    -	lastMsgSentTime       int64
    -	lastMsgRecvTime       int64
    -}
    -
     // ContextErr converts the error from context package into a status error.
     func ContextErr(err error) error {
     	switch err {
    diff --git a/vendor/google.golang.org/grpc/internal/xds/xds.go b/vendor/google.golang.org/grpc/internal/xds/xds.go
    new file mode 100644
    index 0000000000..024c388b7a
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/internal/xds/xds.go
    @@ -0,0 +1,42 @@
    +/*
    + * Copyright 2021 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +// Package xds contains methods to Get/Set handshake cluster names. It is separated
    +// out from the top level /internal package to avoid circular dependencies.
    +package xds
    +
    +import (
    +	"google.golang.org/grpc/attributes"
    +	"google.golang.org/grpc/resolver"
    +)
    +
    +// handshakeClusterNameKey is the type used as the key to store cluster name in
    +// the Attributes field of resolver.Address.
    +type handshakeClusterNameKey struct{}
    +
    +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
    +// is updated with the cluster name.
    +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
    +	addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
    +	return addr
    +}
    +
    +// GetXDSHandshakeClusterName returns cluster name stored in attr.
    +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
    +	v := attr.Value(handshakeClusterNameKey{})
    +	name, ok := v.(string)
    +	return name, ok
    +}
    diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
    deleted file mode 100644
    index e8b492774d..0000000000
    --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
    +++ /dev/null
    @@ -1,40 +0,0 @@
    -/*
    - * Copyright 2021 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - */
    -
    -package internal
    -
    -import (
    -	"google.golang.org/grpc/attributes"
    -	"google.golang.org/grpc/resolver"
    -)
    -
    -// handshakeClusterNameKey is the type used as the key to store cluster name in
    -// the Attributes field of resolver.Address.
    -type handshakeClusterNameKey struct{}
    -
    -// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
    -// is updated with the cluster name.
    -func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
    -	addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
    -	return addr
    -}
    -
    -// GetXDSHandshakeClusterName returns cluster name stored in attr.
    -func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
    -	v := attr.Value(handshakeClusterNameKey{})
    -	name, ok := v.(string)
    -	return name, ok
    -}
    diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
    index 34d31b5e7d..eb42b19fb9 100644
    --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
    +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
    @@ -34,15 +34,29 @@ type ClientParameters struct {
     	// After a duration of this time if the client doesn't see any activity it
     	// pings the server to see if the transport is still alive.
     	// If set below 10s, a minimum value of 10s will be used instead.
    -	Time time.Duration // The current default value is infinity.
    +	//
    +	// Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
    +	// minutes (which means the client shouldn't ping more frequently than every
    +	// 5 minutes).
    +	//
    +	// Though not ideal, it's not a strong requirement for Time to be less than
    +	// EnforcementPolicy.MinTime.  Time will automatically double if the server
    +	// disconnects due to its enforcement policy.
    +	//
    +	// For more details, see
    +	// https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
    +	Time time.Duration
     	// After having pinged for keepalive check, the client waits for a duration
     	// of Timeout and if no activity is seen even after that the connection is
     	// closed.
    -	Timeout time.Duration // The current default value is 20 seconds.
    +	//
    +	// If keepalive is enabled, and this value is not explicitly set, the default
    +	// is 20 seconds.
    +	Timeout time.Duration
     	// If true, client sends keepalive pings even with no active RPCs. If false,
     	// when there are no active RPCs, Time and Timeout will be ignored and no
     	// keepalive pings will be sent.
    -	PermitWithoutStream bool // false by default.
    +	PermitWithoutStream bool
     }
     
     // ServerParameters is used to set keepalive and max-age parameters on the
    diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
    new file mode 100644
    index 0000000000..c37c58c023
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
    @@ -0,0 +1,194 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package mem
    +
    +import (
    +	"sort"
    +	"sync"
    +
    +	"google.golang.org/grpc/internal"
    +)
    +
    +// BufferPool is a pool of buffers that can be shared and reused, resulting in
    +// decreased memory allocation.
    +type BufferPool interface {
    +	// Get returns a buffer with specified length from the pool.
    +	Get(length int) *[]byte
    +
    +	// Put returns a buffer to the pool.
    +	Put(*[]byte)
    +}
    +
    +var defaultBufferPoolSizes = []int{
    +	256,
    +	4 << 10,  // 4KB (go page size)
    +	16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
    +	32 << 10, // 32KB (default buffer size for io.Copy)
    +	1 << 20,  // 1MB
    +}
    +
    +var defaultBufferPool BufferPool
    +
    +func init() {
    +	defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
    +
    +	internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
    +		defaultBufferPool = pool
    +	}
    +
    +	internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
    +		bufferPoolingThreshold = threshold
    +	}
    +}
    +
    +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
    +// created with NewBufferPool that uses a set of default sizes optimized for
    +// expected workflows.
    +func DefaultBufferPool() BufferPool {
    +	return defaultBufferPool
    +}
    +
    +// NewTieredBufferPool returns a BufferPool implementation that uses multiple
    +// underlying pools of the given pool sizes.
    +func NewTieredBufferPool(poolSizes ...int) BufferPool {
    +	sort.Ints(poolSizes)
    +	pools := make([]*sizedBufferPool, len(poolSizes))
    +	for i, s := range poolSizes {
    +		pools[i] = newSizedBufferPool(s)
    +	}
    +	return &tieredBufferPool{
    +		sizedPools: pools,
    +	}
    +}
    +
    +// tieredBufferPool implements the BufferPool interface with multiple tiers of
    +// buffer pools for different sizes of buffers.
    +type tieredBufferPool struct {
    +	sizedPools   []*sizedBufferPool
    +	fallbackPool simpleBufferPool
    +}
    +
    +func (p *tieredBufferPool) Get(size int) *[]byte {
    +	return p.getPool(size).Get(size)
    +}
    +
    +func (p *tieredBufferPool) Put(buf *[]byte) {
    +	p.getPool(cap(*buf)).Put(buf)
    +}
    +
    +func (p *tieredBufferPool) getPool(size int) BufferPool {
    +	poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
    +		return p.sizedPools[i].defaultSize >= size
    +	})
    +
    +	if poolIdx == len(p.sizedPools) {
    +		return &p.fallbackPool
    +	}
    +
    +	return p.sizedPools[poolIdx]
    +}
    +
    +// sizedBufferPool is a BufferPool implementation that is optimized for specific
    +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
    +// of 16kb and a sizedBufferPool can be configured to only return buffers with a
    +// capacity of 16kb. Note that however it does not support returning larger
    +// buffers and in fact panics if such a buffer is requested. Because of this,
    +// this BufferPool implementation is not meant to be used on its own and rather
    +// is intended to be embedded in a tieredBufferPool such that Get is only
    +// invoked when the required size is smaller than or equal to defaultSize.
    +type sizedBufferPool struct {
    +	pool        sync.Pool
    +	defaultSize int
    +}
    +
    +func (p *sizedBufferPool) Get(size int) *[]byte {
    +	buf := p.pool.Get().(*[]byte)
    +	b := *buf
    +	clear(b[:cap(b)])
    +	*buf = b[:size]
    +	return buf
    +}
    +
    +func (p *sizedBufferPool) Put(buf *[]byte) {
    +	if cap(*buf) < p.defaultSize {
    +		// Ignore buffers that are too small to fit in the pool. Otherwise, when
    +		// Get is called it will panic as it tries to index outside the bounds
    +		// of the buffer.
    +		return
    +	}
    +	p.pool.Put(buf)
    +}
    +
    +func newSizedBufferPool(size int) *sizedBufferPool {
    +	return &sizedBufferPool{
    +		pool: sync.Pool{
    +			New: func() any {
    +				buf := make([]byte, size)
    +				return &buf
    +			},
    +		},
    +		defaultSize: size,
    +	}
    +}
    +
    +var _ BufferPool = (*simpleBufferPool)(nil)
    +
    +// simpleBufferPool is an implementation of the BufferPool interface that
    +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
    +// acquire a buffer from the pool but if that buffer is too small, it returns it
    +// to the pool and creates a new one.
    +type simpleBufferPool struct {
    +	pool sync.Pool
    +}
    +
    +func (p *simpleBufferPool) Get(size int) *[]byte {
    +	bs, ok := p.pool.Get().(*[]byte)
    +	if ok && cap(*bs) >= size {
    +		*bs = (*bs)[:size]
    +		return bs
    +	}
    +
    +	// A buffer was pulled from the pool, but it is too small. Put it back in
    +	// the pool and create one large enough.
    +	if ok {
    +		p.pool.Put(bs)
    +	}
    +
    +	b := make([]byte, size)
    +	return &b
    +}
    +
    +func (p *simpleBufferPool) Put(buf *[]byte) {
    +	p.pool.Put(buf)
    +}
    +
    +var _ BufferPool = NopBufferPool{}
    +
    +// NopBufferPool is a buffer pool that returns new buffers without pooling.
    +type NopBufferPool struct{}
    +
    +// Get returns a buffer with specified length from the pool.
    +func (NopBufferPool) Get(length int) *[]byte {
    +	b := make([]byte, length)
    +	return &b
    +}
    +
    +// Put returns a buffer to the pool.
    +func (NopBufferPool) Put(*[]byte) {
    +}
    diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
    new file mode 100644
    index 0000000000..65002e2cc8
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
    @@ -0,0 +1,281 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package mem
    +
    +import (
    +	"io"
    +)
    +
    +const (
    +	// 32 KiB is what io.Copy uses.
    +	readAllBufSize = 32 * 1024
    +)
    +
    +// BufferSlice offers a means to represent data that spans one or more Buffer
    +// instances. A BufferSlice is meant to be immutable after creation, and methods
    +// like Ref create and return copies of the slice. This is why all methods have
    +// value receivers rather than pointer receivers.
    +//
    +// Note that any of the methods that read the underlying buffers such as Ref,
    +// Len or CopyTo etc., will panic if any underlying buffers have already been
    +// freed. It is recommended to not directly interact with any of the underlying
    +// buffers directly, rather such interactions should be mediated through the
    +// various methods on this type.
    +//
    +// By convention, any APIs that return (mem.BufferSlice, error) should reduce
    +// the burden on the caller by never returning a mem.BufferSlice that needs to
    +// be freed if the error is non-nil, unless explicitly stated.
    +type BufferSlice []Buffer
    +
    +// Len returns the sum of the length of all the Buffers in this slice.
    +//
    +// # Warning
    +//
    +// Invoking the built-in len on a BufferSlice will return the number of buffers
    +// in the slice, and *not* the value returned by this function.
    +func (s BufferSlice) Len() int {
    +	var length int
    +	for _, b := range s {
    +		length += b.Len()
    +	}
    +	return length
    +}
    +
    +// Ref invokes Ref on each buffer in the slice.
    +func (s BufferSlice) Ref() {
    +	for _, b := range s {
    +		b.Ref()
    +	}
    +}
    +
    +// Free invokes Buffer.Free() on each Buffer in the slice.
    +func (s BufferSlice) Free() {
    +	for _, b := range s {
    +		b.Free()
    +	}
    +}
    +
    +// CopyTo copies each of the underlying Buffer's data into the given buffer,
    +// returning the number of bytes copied. Has the same semantics as the copy
    +// builtin in that it will copy as many bytes as it can, stopping when either dst
    +// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
    +func (s BufferSlice) CopyTo(dst []byte) int {
    +	off := 0
    +	for _, b := range s {
    +		off += copy(dst[off:], b.ReadOnlyData())
    +	}
    +	return off
    +}
    +
    +// Materialize concatenates all the underlying Buffer's data into a single
    +// contiguous buffer using CopyTo.
    +func (s BufferSlice) Materialize() []byte {
    +	l := s.Len()
    +	if l == 0 {
    +		return nil
    +	}
    +	out := make([]byte, l)
    +	s.CopyTo(out)
    +	return out
    +}
    +
    +// MaterializeToBuffer functions like Materialize except that it writes the data
    +// to a single Buffer pulled from the given BufferPool.
    +//
    +// As a special case, if the input BufferSlice only actually has one Buffer, this
    +// function simply increases the refcount before returning said Buffer. Freeing this
    +// buffer won't release it until the BufferSlice is itself released.
    +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
    +	if len(s) == 1 {
    +		s[0].Ref()
    +		return s[0]
    +	}
    +	sLen := s.Len()
    +	if sLen == 0 {
    +		return emptyBuffer{}
    +	}
    +	buf := pool.Get(sLen)
    +	s.CopyTo(*buf)
    +	return NewBuffer(buf, pool)
    +}
    +
    +// Reader returns a new Reader for the input slice after taking references to
    +// each underlying buffer.
    +func (s BufferSlice) Reader() Reader {
    +	s.Ref()
    +	return &sliceReader{
    +		data: s,
    +		len:  s.Len(),
    +	}
    +}
    +
    +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
    +// with other parts systems. It also provides an additional convenience method
    +// Remaining(), which returns the number of unread bytes remaining in the slice.
    +// Buffers will be freed as they are read.
    +type Reader interface {
    +	io.Reader
    +	io.ByteReader
    +	// Close frees the underlying BufferSlice and never returns an error. Subsequent
    +	// calls to Read will return (0, io.EOF).
    +	Close() error
    +	// Remaining returns the number of unread bytes remaining in the slice.
    +	Remaining() int
    +}
    +
    +type sliceReader struct {
    +	data BufferSlice
    +	len  int
    +	// The index into data[0].ReadOnlyData().
    +	bufferIdx int
    +}
    +
    +func (r *sliceReader) Remaining() int {
    +	return r.len
    +}
    +
    +func (r *sliceReader) Close() error {
    +	r.data.Free()
    +	r.data = nil
    +	r.len = 0
    +	return nil
    +}
    +
    +func (r *sliceReader) freeFirstBufferIfEmpty() bool {
    +	if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
    +		return false
    +	}
    +
    +	r.data[0].Free()
    +	r.data = r.data[1:]
    +	r.bufferIdx = 0
    +	return true
    +}
    +
    +func (r *sliceReader) Read(buf []byte) (n int, _ error) {
    +	if r.len == 0 {
    +		return 0, io.EOF
    +	}
    +
    +	for len(buf) != 0 && r.len != 0 {
    +		// Copy as much as possible from the first Buffer in the slice into the
    +		// given byte slice.
    +		data := r.data[0].ReadOnlyData()
    +		copied := copy(buf, data[r.bufferIdx:])
    +		r.len -= copied       // Reduce len by the number of bytes copied.
    +		r.bufferIdx += copied // Increment the buffer index.
    +		n += copied           // Increment the total number of bytes read.
    +		buf = buf[copied:]    // Shrink the given byte slice.
    +
    +		// If we have copied all the data from the first Buffer, free it and advance to
    +		// the next in the slice.
    +		r.freeFirstBufferIfEmpty()
    +	}
    +
    +	return n, nil
    +}
    +
    +func (r *sliceReader) ReadByte() (byte, error) {
    +	if r.len == 0 {
    +		return 0, io.EOF
    +	}
    +
    +	// There may be any number of empty buffers in the slice, clear them all until a
    +	// non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
    +	for r.freeFirstBufferIfEmpty() {
    +	}
    +
    +	b := r.data[0].ReadOnlyData()[r.bufferIdx]
    +	r.len--
    +	r.bufferIdx++
    +	// Free the first buffer in the slice if the last byte was read
    +	r.freeFirstBufferIfEmpty()
    +	return b, nil
    +}
    +
    +var _ io.Writer = (*writer)(nil)
    +
    +type writer struct {
    +	buffers *BufferSlice
    +	pool    BufferPool
    +}
    +
    +func (w *writer) Write(p []byte) (n int, err error) {
    +	b := Copy(p, w.pool)
    +	*w.buffers = append(*w.buffers, b)
    +	return b.Len(), nil
    +}
    +
    +// NewWriter wraps the given BufferSlice and BufferPool to implement the
    +// io.Writer interface. Every call to Write copies the contents of the given
    +// buffer into a new Buffer pulled from the given pool and the Buffer is
    +// added to the given BufferSlice.
    +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
    +	return &writer{buffers: buffers, pool: pool}
    +}
    +
    +// ReadAll reads from r until an error or EOF and returns the data it read.
    +// A successful call returns err == nil, not err == EOF. Because ReadAll is
    +// defined to read from src until EOF, it does not treat an EOF from Read
    +// as an error to be reported.
    +//
    +// Important: A failed call returns a non-nil error and may also return
    +// partially read buffers. It is the responsibility of the caller to free the
    +// BufferSlice returned, or its memory will not be reused.
    +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
    +	var result BufferSlice
    +	if wt, ok := r.(io.WriterTo); ok {
    +		// This is more optimal since wt knows the size of chunks it wants to
    +		// write and, hence, we can allocate buffers of an optimal size to fit
    +		// them. E.g. might be a single big chunk, and we wouldn't chop it
    +		// into pieces.
    +		w := NewWriter(&result, pool)
    +		_, err := wt.WriteTo(w)
    +		return result, err
    +	}
    +nextBuffer:
    +	for {
    +		buf := pool.Get(readAllBufSize)
    +		// We asked for 32KiB but may have been given a bigger buffer.
    +		// Use all of it if that's the case.
    +		*buf = (*buf)[:cap(*buf)]
    +		usedCap := 0
    +		for {
    +			n, err := r.Read((*buf)[usedCap:])
    +			usedCap += n
    +			if err != nil {
    +				if usedCap == 0 {
    +					// Nothing in this buf, put it back
    +					pool.Put(buf)
    +				} else {
    +					*buf = (*buf)[:usedCap]
    +					result = append(result, NewBuffer(buf, pool))
    +				}
    +				if err == io.EOF {
    +					err = nil
    +				}
    +				return result, err
    +			}
    +			if len(*buf) == usedCap {
    +				result = append(result, NewBuffer(buf, pool))
    +				continue nextBuffer
    +			}
    +		}
    +	}
    +}
    diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
    new file mode 100644
    index 0000000000..ecbf0b9a73
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/mem/buffers.go
    @@ -0,0 +1,268 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +// Package mem provides utilities that facilitate memory reuse in byte slices
    +// that are used as buffers.
    +//
    +// # Experimental
    +//
    +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
    +// removed in a later release.
    +package mem
    +
    +import (
    +	"fmt"
    +	"sync"
    +	"sync/atomic"
    +)
    +
    +// A Buffer represents a reference counted piece of data (in bytes) that can be
    +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
    +// released by calling Free(), which invokes the free function given at creation
    +// only after all references are released.
    +//
    +// Note that a Buffer is not safe for concurrent access and instead each
    +// goroutine should use its own reference to the data, which can be acquired via
    +// a call to Ref().
    +//
    +// Attempts to access the underlying data after releasing the reference to the
    +// Buffer will panic.
    +type Buffer interface {
    +	// ReadOnlyData returns the underlying byte slice. Note that it is undefined
    +	// behavior to modify the contents of this slice in any way.
    +	ReadOnlyData() []byte
    +	// Ref increases the reference counter for this Buffer.
    +	Ref()
    +	// Free decrements this Buffer's reference counter and frees the underlying
    +	// byte slice if the counter reaches 0 as a result of this call.
    +	Free()
    +	// Len returns the Buffer's size.
    +	Len() int
    +
    +	split(n int) (left, right Buffer)
    +	read(buf []byte) (int, Buffer)
    +}
    +
    +var (
    +	bufferPoolingThreshold = 1 << 10
    +
    +	bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
    +	refObjectPool    = sync.Pool{New: func() any { return new(atomic.Int32) }}
    +)
    +
    +// IsBelowBufferPoolingThreshold returns true if the given size is less than or
    +// equal to the threshold for buffer pooling. This is used to determine whether
    +// to pool buffers or allocate them directly.
    +func IsBelowBufferPoolingThreshold(size int) bool {
    +	return size <= bufferPoolingThreshold
    +}
    +
    +type buffer struct {
    +	origData *[]byte
    +	data     []byte
    +	refs     *atomic.Int32
    +	pool     BufferPool
    +}
    +
    +func newBuffer() *buffer {
    +	return bufferObjectPool.Get().(*buffer)
    +}
    +
    +// NewBuffer creates a new Buffer from the given data, initializing the reference
    +// counter to 1. The data will then be returned to the given pool when all
    +// references to the returned Buffer are released. As a special case to avoid
    +// additional allocations, if the given buffer pool is nil, the returned buffer
    +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
    +// underlying data is never freed.
    +//
    +// Note that the backing array of the given data is not copied.
    +func NewBuffer(data *[]byte, pool BufferPool) Buffer {
    +	// Use the buffer's capacity instead of the length, otherwise buffers may
    +	// not be reused under certain conditions. For example, if a large buffer
    +	// is acquired from the pool, but fewer bytes than the buffering threshold
    +	// are written to it, the buffer will not be returned to the pool.
    +	if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
    +		return (SliceBuffer)(*data)
    +	}
    +	b := newBuffer()
    +	b.origData = data
    +	b.data = *data
    +	b.pool = pool
    +	b.refs = refObjectPool.Get().(*atomic.Int32)
    +	b.refs.Add(1)
    +	return b
    +}
    +
    +// Copy creates a new Buffer from the given data, initializing the reference
    +// counter to 1.
    +//
    +// It acquires a []byte from the given pool and copies over the backing array
    +// of the given data. The []byte acquired from the pool is returned to the
    +// pool when all references to the returned Buffer are released.
    +func Copy(data []byte, pool BufferPool) Buffer {
    +	if IsBelowBufferPoolingThreshold(len(data)) {
    +		buf := make(SliceBuffer, len(data))
    +		copy(buf, data)
    +		return buf
    +	}
    +
    +	buf := pool.Get(len(data))
    +	copy(*buf, data)
    +	return NewBuffer(buf, pool)
    +}
    +
    +func (b *buffer) ReadOnlyData() []byte {
    +	if b.refs == nil {
    +		panic("Cannot read freed buffer")
    +	}
    +	return b.data
    +}
    +
    +func (b *buffer) Ref() {
    +	if b.refs == nil {
    +		panic("Cannot ref freed buffer")
    +	}
    +	b.refs.Add(1)
    +}
    +
    +func (b *buffer) Free() {
    +	if b.refs == nil {
    +		panic("Cannot free freed buffer")
    +	}
    +
    +	refs := b.refs.Add(-1)
    +	switch {
    +	case refs > 0:
    +		return
    +	case refs == 0:
    +		if b.pool != nil {
    +			b.pool.Put(b.origData)
    +		}
    +
    +		refObjectPool.Put(b.refs)
    +		b.origData = nil
    +		b.data = nil
    +		b.refs = nil
    +		b.pool = nil
    +		bufferObjectPool.Put(b)
    +	default:
    +		panic("Cannot free freed buffer")
    +	}
    +}
    +
    +func (b *buffer) Len() int {
    +	return len(b.ReadOnlyData())
    +}
    +
    +func (b *buffer) split(n int) (Buffer, Buffer) {
    +	if b.refs == nil {
    +		panic("Cannot split freed buffer")
    +	}
    +
    +	b.refs.Add(1)
    +	split := newBuffer()
    +	split.origData = b.origData
    +	split.data = b.data[n:]
    +	split.refs = b.refs
    +	split.pool = b.pool
    +
    +	b.data = b.data[:n]
    +
    +	return b, split
    +}
    +
    +func (b *buffer) read(buf []byte) (int, Buffer) {
    +	if b.refs == nil {
    +		panic("Cannot read freed buffer")
    +	}
    +
    +	n := copy(buf, b.data)
    +	if n == len(b.data) {
    +		b.Free()
    +		return n, nil
    +	}
    +
    +	b.data = b.data[n:]
    +	return n, b
    +}
    +
    +func (b *buffer) String() string {
    +	return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
    +}
    +
    +// ReadUnsafe reads bytes from the given Buffer into the provided slice.
    +// It does not perform safety checks.
    +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
    +	return buf.read(dst)
    +}
    +
    +// SplitUnsafe modifies the receiver to point to the first n bytes while it
    +// returns a new reference to the remaining bytes. The returned Buffer
    +// functions just like a normal reference acquired using Ref().
    +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
    +	return buf.split(n)
    +}
    +
    +type emptyBuffer struct{}
    +
    +func (e emptyBuffer) ReadOnlyData() []byte {
    +	return nil
    +}
    +
    +func (e emptyBuffer) Ref()  {}
    +func (e emptyBuffer) Free() {}
    +
    +func (e emptyBuffer) Len() int {
    +	return 0
    +}
    +
    +func (e emptyBuffer) split(int) (left, right Buffer) {
    +	return e, e
    +}
    +
    +func (e emptyBuffer) read([]byte) (int, Buffer) {
    +	return 0, e
    +}
    +
    +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
    +// methods for reading, splitting, and managing the byte slice.
    +type SliceBuffer []byte
    +
    +// ReadOnlyData returns the byte slice.
    +func (s SliceBuffer) ReadOnlyData() []byte { return s }
    +
    +// Ref is a noop implementation of Ref.
    +func (s SliceBuffer) Ref() {}
    +
    +// Free is a noop implementation of Free.
    +func (s SliceBuffer) Free() {}
    +
    +// Len is a noop implementation of Len.
    +func (s SliceBuffer) Len() int { return len(s) }
    +
    +func (s SliceBuffer) split(n int) (left, right Buffer) {
    +	return s[:n], s[n:]
    +}
    +
    +func (s SliceBuffer) read(buf []byte) (int, Buffer) {
    +	n := copy(buf, s)
    +	if n == len(s) {
    +		return n, nil
    +	}
    +	return n, s[n:]
    +}
    diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
    index 1e9485fd6e..d2e15253bb 100644
    --- a/vendor/google.golang.org/grpc/metadata/metadata.go
    +++ b/vendor/google.golang.org/grpc/metadata/metadata.go
    @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
     // ValueFromIncomingContext returns the metadata value corresponding to the metadata
     // key from the incoming metadata if it exists. Keys are matched in a case insensitive
     // manner.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
     func ValueFromIncomingContext(ctx context.Context, key string) []string {
     	md, ok := ctx.Value(mdIncomingKey{}).(MD)
     	if !ok {
    @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
     		return copyOf(v)
     	}
     	for k, v := range md {
    -		// Case insenitive comparison: MD is a map, and there's no guarantee
    +		// Case insensitive comparison: MD is a map, and there's no guarantee
     		// that the MD attached to the context is created using our helper
     		// functions.
     		if strings.EqualFold(k, key) {
    diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
    index a821ff9b2b..499a49c8c1 100644
    --- a/vendor/google.golang.org/grpc/peer/peer.go
    +++ b/vendor/google.golang.org/grpc/peer/peer.go
    @@ -22,7 +22,9 @@ package peer
     
     import (
     	"context"
    +	"fmt"
     	"net"
    +	"strings"
     
     	"google.golang.org/grpc/credentials"
     )
    @@ -39,6 +41,34 @@ type Peer struct {
     	AuthInfo credentials.AuthInfo
     }
     
    +// String ensures the Peer types implements the Stringer interface in order to
    +// allow to print a context with a peerKey value effectively.
    +func (p *Peer) String() string {
    +	if p == nil {
    +		return "Peer"
    +	}
    +	sb := &strings.Builder{}
    +	sb.WriteString("Peer{")
    +	if p.Addr != nil {
    +		fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
    +	} else {
    +		fmt.Fprintf(sb, "Addr: , ")
    +	}
    +	if p.LocalAddr != nil {
    +		fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
    +	} else {
    +		fmt.Fprintf(sb, "LocalAddr: , ")
    +	}
    +	if p.AuthInfo != nil {
    +		fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
    +	} else {
    +		fmt.Fprintf(sb, "AuthInfo: ")
    +	}
    +	sb.WriteString("}")
    +
    +	return sb.String()
    +}
    +
     type peerKey struct{}
     
     // NewContext creates a new context with peer information attached.
    diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
    index bf56faa76d..bdaa2130e4 100644
    --- a/vendor/google.golang.org/grpc/picker_wrapper.go
    +++ b/vendor/google.golang.org/grpc/picker_wrapper.go
    @@ -20,8 +20,9 @@ package grpc
     
     import (
     	"context"
    +	"fmt"
     	"io"
    -	"sync"
    +	"sync/atomic"
     
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/codes"
    @@ -32,35 +33,43 @@ import (
     	"google.golang.org/grpc/status"
     )
     
    +// pickerGeneration stores a picker and a channel used to signal that a picker
    +// newer than this one is available.
    +type pickerGeneration struct {
    +	// picker is the picker produced by the LB policy.  May be nil if a picker
    +	// has never been produced.
    +	picker balancer.Picker
    +	// blockingCh is closed when the picker has been invalidated because there
    +	// is a new one available.
    +	blockingCh chan struct{}
    +}
    +
     // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
     // actions and unblock when there's a picker update.
     type pickerWrapper struct {
    -	mu            sync.Mutex
    -	done          bool
    -	blockingCh    chan struct{}
    -	picker        balancer.Picker
    +	// If pickerGen holds a nil pointer, the pickerWrapper is closed.
    +	pickerGen     atomic.Pointer[pickerGeneration]
     	statsHandlers []stats.Handler // to record blocking picker calls
     }
     
     func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
    -	return &pickerWrapper{
    -		blockingCh:    make(chan struct{}),
    +	pw := &pickerWrapper{
     		statsHandlers: statsHandlers,
     	}
    +	pw.pickerGen.Store(&pickerGeneration{
    +		blockingCh: make(chan struct{}),
    +	})
    +	return pw
     }
     
    -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
    +// updatePicker is called by UpdateState calls from the LB policy. It
    +// unblocks all blocked pick.
     func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
    -	pw.mu.Lock()
    -	if pw.done {
    -		pw.mu.Unlock()
    -		return
    -	}
    -	pw.picker = p
    -	// pw.blockingCh should never be nil.
    -	close(pw.blockingCh)
    -	pw.blockingCh = make(chan struct{})
    -	pw.mu.Unlock()
    +	old := pw.pickerGen.Swap(&pickerGeneration{
    +		picker:     p,
    +		blockingCh: make(chan struct{}),
    +	})
    +	close(old.blockingCh)
     }
     
     // doneChannelzWrapper performs the following:
    @@ -97,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     	var lastPickErr error
     
     	for {
    -		pw.mu.Lock()
    -		if pw.done {
    -			pw.mu.Unlock()
    +		pg := pw.pickerGen.Load()
    +		if pg == nil {
     			return nil, balancer.PickResult{}, ErrClientConnClosing
     		}
    -
    -		if pw.picker == nil {
    -			ch = pw.blockingCh
    +		if pg.picker == nil {
    +			ch = pg.blockingCh
     		}
    -		if ch == pw.blockingCh {
    +		if ch == pg.blockingCh {
     			// This could happen when either:
     			// - pw.picker is nil (the previous if condition), or
    -			// - has called pick on the current picker.
    -			pw.mu.Unlock()
    +			// - we have already called pick on the current picker.
     			select {
     			case <-ctx.Done():
     				var errStr string
     				if lastPickErr != nil {
     					errStr = "latest balancer error: " + lastPickErr.Error()
     				} else {
    -					errStr = ctx.Err().Error()
    +					errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
     				}
     				switch ctx.Err() {
     				case context.DeadlineExceeded:
    @@ -144,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     			}
     		}
     
    -		ch = pw.blockingCh
    -		p := pw.picker
    -		pw.mu.Unlock()
    +		ch = pg.blockingCh
    +		p := pg.picker
     
     		pickResult, err := p.Pick(info)
     		if err != nil {
    @@ -196,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
     }
     
     func (pw *pickerWrapper) close() {
    -	pw.mu.Lock()
    -	defer pw.mu.Unlock()
    -	if pw.done {
    -		return
    -	}
    -	pw.done = true
    -	close(pw.blockingCh)
    +	old := pw.pickerGen.Swap(nil)
    +	close(old.blockingCh)
     }
     
     // reset clears the pickerWrapper and prepares it for being used again when idle
     // mode is exited.
     func (pw *pickerWrapper) reset() {
    -	pw.mu.Lock()
    -	defer pw.mu.Unlock()
    -	if pw.done {
    -		return
    -	}
    -	pw.blockingCh = make(chan struct{})
    +	old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
    +	close(old.blockingCh)
     }
     
     // dropError is a wrapper error that indicates the LB policy wishes to drop the
    diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
    deleted file mode 100644
    index 5128f9364d..0000000000
    --- a/vendor/google.golang.org/grpc/pickfirst.go
    +++ /dev/null
    @@ -1,249 +0,0 @@
    -/*
    - *
    - * Copyright 2017 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package grpc
    -
    -import (
    -	"encoding/json"
    -	"errors"
    -	"fmt"
    -
    -	"google.golang.org/grpc/balancer"
    -	"google.golang.org/grpc/connectivity"
    -	internalgrpclog "google.golang.org/grpc/internal/grpclog"
    -	"google.golang.org/grpc/internal/grpcrand"
    -	"google.golang.org/grpc/internal/pretty"
    -	"google.golang.org/grpc/resolver"
    -	"google.golang.org/grpc/serviceconfig"
    -)
    -
    -const (
    -	// PickFirstBalancerName is the name of the pick_first balancer.
    -	PickFirstBalancerName = "pick_first"
    -	logPrefix             = "[pick-first-lb %p] "
    -)
    -
    -func newPickfirstBuilder() balancer.Builder {
    -	return &pickfirstBuilder{}
    -}
    -
    -type pickfirstBuilder struct{}
    -
    -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
    -	b := &pickfirstBalancer{cc: cc}
    -	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
    -	return b
    -}
    -
    -func (*pickfirstBuilder) Name() string {
    -	return PickFirstBalancerName
    -}
    -
    -type pfConfig struct {
    -	serviceconfig.LoadBalancingConfig `json:"-"`
    -
    -	// If set to true, instructs the LB policy to shuffle the order of the list
    -	// of addresses received from the name resolver before attempting to
    -	// connect to them.
    -	ShuffleAddressList bool `json:"shuffleAddressList"`
    -}
    -
    -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    -	var cfg pfConfig
    -	if err := json.Unmarshal(js, &cfg); err != nil {
    -		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
    -	}
    -	return cfg, nil
    -}
    -
    -type pickfirstBalancer struct {
    -	logger  *internalgrpclog.PrefixLogger
    -	state   connectivity.State
    -	cc      balancer.ClientConn
    -	subConn balancer.SubConn
    -}
    -
    -func (b *pickfirstBalancer) ResolverError(err error) {
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received error from the name resolver: %v", err)
    -	}
    -	if b.subConn == nil {
    -		b.state = connectivity.TransientFailure
    -	}
    -
    -	if b.state != connectivity.TransientFailure {
    -		// The picker will not change since the balancer does not currently
    -		// report an error.
    -		return
    -	}
    -	b.cc.UpdateState(balancer.State{
    -		ConnectivityState: connectivity.TransientFailure,
    -		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
    -	})
    -}
    -
    -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
    -	addrs := state.ResolverState.Addresses
    -	if len(addrs) == 0 {
    -		// The resolver reported an empty address list. Treat it like an error by
    -		// calling b.ResolverError.
    -		if b.subConn != nil {
    -			// Shut down the old subConn. All addresses were removed, so it is
    -			// no longer valid.
    -			b.subConn.Shutdown()
    -			b.subConn = nil
    -		}
    -		b.ResolverError(errors.New("produced zero addresses"))
    -		return balancer.ErrBadResolverState
    -	}
    -
    -	// We don't have to guard this block with the env var because ParseConfig
    -	// already does so.
    -	cfg, ok := state.BalancerConfig.(pfConfig)
    -	if state.BalancerConfig != nil && !ok {
    -		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
    -	}
    -	if cfg.ShuffleAddressList {
    -		addrs = append([]resolver.Address{}, addrs...)
    -		grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
    -	}
    -
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
    -	}
    -
    -	if b.subConn != nil {
    -		b.cc.UpdateAddresses(b.subConn, addrs)
    -		return nil
    -	}
    -
    -	var subConn balancer.SubConn
    -	subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
    -		StateListener: func(state balancer.SubConnState) {
    -			b.updateSubConnState(subConn, state)
    -		},
    -	})
    -	if err != nil {
    -		if b.logger.V(2) {
    -			b.logger.Infof("Failed to create new SubConn: %v", err)
    -		}
    -		b.state = connectivity.TransientFailure
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: connectivity.TransientFailure,
    -			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
    -		})
    -		return balancer.ErrBadResolverState
    -	}
    -	b.subConn = subConn
    -	b.state = connectivity.Idle
    -	b.cc.UpdateState(balancer.State{
    -		ConnectivityState: connectivity.Connecting,
    -		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -	})
    -	b.subConn.Connect()
    -	return nil
    -}
    -
    -// UpdateSubConnState is unused as a StateListener is always registered when
    -// creating SubConns.
    -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    -	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
    -}
    -
    -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
    -	if b.logger.V(2) {
    -		b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
    -	}
    -	if b.subConn != subConn {
    -		if b.logger.V(2) {
    -			b.logger.Infof("Ignored state change because subConn is not recognized")
    -		}
    -		return
    -	}
    -	if state.ConnectivityState == connectivity.Shutdown {
    -		b.subConn = nil
    -		return
    -	}
    -
    -	switch state.ConnectivityState {
    -	case connectivity.Ready:
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
    -		})
    -	case connectivity.Connecting:
    -		if b.state == connectivity.TransientFailure {
    -			// We stay in TransientFailure until we are Ready. See A62.
    -			return
    -		}
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
    -		})
    -	case connectivity.Idle:
    -		if b.state == connectivity.TransientFailure {
    -			// We stay in TransientFailure until we are Ready. Also kick the
    -			// subConn out of Idle into Connecting. See A62.
    -			b.subConn.Connect()
    -			return
    -		}
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &idlePicker{subConn: subConn},
    -		})
    -	case connectivity.TransientFailure:
    -		b.cc.UpdateState(balancer.State{
    -			ConnectivityState: state.ConnectivityState,
    -			Picker:            &picker{err: state.ConnectionError},
    -		})
    -	}
    -	b.state = state.ConnectivityState
    -}
    -
    -func (b *pickfirstBalancer) Close() {
    -}
    -
    -func (b *pickfirstBalancer) ExitIdle() {
    -	if b.subConn != nil && b.state == connectivity.Idle {
    -		b.subConn.Connect()
    -	}
    -}
    -
    -type picker struct {
    -	result balancer.PickResult
    -	err    error
    -}
    -
    -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    -	return p.result, p.err
    -}
    -
    -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
    -// CONNECTING when Pick is called.
    -type idlePicker struct {
    -	subConn balancer.SubConn
    -}
    -
    -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
    -	i.subConn.Connect()
    -	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
    -}
    -
    -func init() {
    -	balancer.Register(newPickfirstBuilder())
    -}
    diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
    index 73bd633643..ee0ff969af 100644
    --- a/vendor/google.golang.org/grpc/preloader.go
    +++ b/vendor/google.golang.org/grpc/preloader.go
    @@ -20,6 +20,7 @@ package grpc
     
     import (
     	"google.golang.org/grpc/codes"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/status"
     )
     
    @@ -31,9 +32,10 @@ import (
     // later release.
     type PreparedMsg struct {
     	// Struct for preparing msg before sending them
    -	encodedData []byte
    +	encodedData mem.BufferSlice
     	hdr         []byte
    -	payload     []byte
    +	payload     mem.BufferSlice
    +	pf          payloadFormat
     }
     
     // Encode marshalls and compresses the message using the codec and compressor for the stream.
    @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
     	if err != nil {
     		return err
     	}
    -	p.encodedData = data
    -	compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
    +
    +	materializedData := data.Materialize()
    +	data.Free()
    +	p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
    +
    +	// TODO: it should be possible to grab the bufferPool from the underlying
    +	//  stream implementation with a type cast to its actual type (such as
    +	//  addrConnStream) and accessing the buffer pool directly.
    +	var compData mem.BufferSlice
    +	compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
     	if err != nil {
     		return err
     	}
    -	p.hdr, p.payload = msgHeader(data, compData)
    +
    +	if p.pf.isCompressed() {
    +		materializedCompData := compData.Materialize()
    +		compData.Free()
    +		compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
    +	}
    +
    +	p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
    +
     	return nil
     }
    diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
    deleted file mode 100644
    index a6f26c8ab0..0000000000
    --- a/vendor/google.golang.org/grpc/regenerate.sh
    +++ /dev/null
    @@ -1,123 +0,0 @@
    -#!/bin/bash
    -# Copyright 2020 gRPC authors.
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License");
    -# you may not use this file except in compliance with the License.
    -# You may obtain a copy of the License at
    -#
    -#      http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -
    -set -eu -o pipefail
    -
    -WORKDIR=$(mktemp -d)
    -
    -function finish {
    -  rm -rf "$WORKDIR"
    -}
    -trap finish EXIT
    -
    -export GOBIN=${WORKDIR}/bin
    -export PATH=${GOBIN}:${PATH}
    -mkdir -p ${GOBIN}
    -
    -echo "remove existing generated files"
    -# grpc_testing_not_regenerate/*.pb.go is not re-generated,
    -# see grpc_testing_not_regenerate/README.md for details.
    -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
    -
    -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
    -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
    -
    -echo "go install cmd/protoc-gen-go-grpc"
    -(cd cmd/protoc-gen-go-grpc && go install .)
    -
    -echo "git clone https://github.com/grpc/grpc-proto"
    -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
    -
    -echo "git clone https://github.com/protocolbuffers/protobuf"
    -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
    -
    -# Pull in code.proto as a proto dependency
    -mkdir -p ${WORKDIR}/googleapis/google/rpc
    -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
    -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
    -
    -mkdir -p ${WORKDIR}/out
    -
    -# Generates sources without the embed requirement
    -LEGACY_SOURCES=(
    -  ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
    -  ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
    -  ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
    -  ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
    -  profiling/proto/service.proto
    -  ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
    -  ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
    -)
    -
    -# Generates only the new gRPC Service symbols
    -SOURCES=(
    -  $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
    -  ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
    -  ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
    -  ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
    -  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
    -  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
    -  ${WORKDIR}/grpc-proto/grpc/testing/*.proto
    -  ${WORKDIR}/grpc-proto/grpc/core/*.proto
    -)
    -
    -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
    -# import path of 'bar' in the generated code when 'foo.proto' is imported in
    -# one of the sources.
    -#
    -# Note that the protos listed here are all for testing purposes. All protos to
    -# be used externally should have a go_package option (and they don't need to be
    -# listed here).
    -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
    -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
    -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
    -
    -for src in ${SOURCES[@]}; do
    -  echo "protoc ${src}"
    -  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
    -    -I"." \
    -    -I${WORKDIR}/grpc-proto \
    -    -I${WORKDIR}/googleapis \
    -    -I${WORKDIR}/protobuf/src \
    -    ${src}
    -done
    -
    -for src in ${LEGACY_SOURCES[@]}; do
    -  echo "protoc ${src}"
    -  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
    -    -I"." \
    -    -I${WORKDIR}/grpc-proto \
    -    -I${WORKDIR}/googleapis \
    -    -I${WORKDIR}/protobuf/src \
    -    ${src}
    -done
    -
    -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
    -# current location. Move it into the right place.
    -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
    -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
    -
    -# grpc_testing_not_regenerate/*.pb.go are not re-generated,
    -# see grpc_testing_not_regenerate/README.md for details.
    -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
    -
    -cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
    diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
    index 14aa6f20ae..ef3d6ed6c4 100644
    --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
    +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
    @@ -18,19 +18,43 @@
     
     // Package dns implements a dns resolver to be installed as the default resolver
     // in grpc.
    -//
    -// Deprecated: this package is imported by grpc and should not need to be
    -// imported directly by users.
     package dns
     
     import (
    +	"time"
    +
     	"google.golang.org/grpc/internal/resolver/dns"
     	"google.golang.org/grpc/resolver"
     )
     
    +// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
    +//
    +// This function affects the global timeout used by all channels using the DNS
    +// name resolver scheme.
    +//
    +// It must be called only at application startup, before any gRPC calls are
    +// made. Modifying this value after initialization is not thread-safe.
    +//
    +// The default value is 30 seconds. Setting the timeout too low may result in
    +// premature timeouts during resolution, while setting it too high may lead to
    +// unnecessary delays in service discovery. Choose a value appropriate for your
    +// specific needs and network environment.
    +func SetResolvingTimeout(timeout time.Duration) {
    +	dns.ResolvingTimeout = timeout
    +}
    +
     // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
     //
     // Deprecated: import grpc and use resolver.Get("dns") instead.
     func NewBuilder() resolver.Builder {
     	return dns.NewBuilder()
     }
    +
    +// SetMinResolutionInterval sets the default minimum interval at which DNS
    +// re-resolutions are allowed. This helps to prevent excessive re-resolution.
    +//
    +// It must be called only at application startup, before any gRPC calls are
    +// made. Modifying this value after initialization is not thread-safe.
    +func SetMinResolutionInterval(d time.Duration) {
    +	dns.MinResolutionInterval = d
    +}
    diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
    index f2efa2a2cb..09e864a89d 100644
    --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
    +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
    @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) {
     
     // Build returns itself for Resolver, because it's both a builder and a resolver.
     func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
    -	r.BuildCallback(target, cc, opts)
     	r.mu.Lock()
     	defer r.mu.Unlock()
    +	// Call BuildCallback after locking to avoid a race when UpdateState
    +	// or ReportError is called before Build returns.
    +	r.BuildCallback(target, cc, opts)
     	r.CC = cc
     	if r.lastSeenState != nil {
     		err := r.CC.UpdateState(*r.lastSeenState)
    diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
    index adf89dd9cf..8eb1cf3bcf 100644
    --- a/vendor/google.golang.org/grpc/resolver/resolver.go
    +++ b/vendor/google.golang.org/grpc/resolver/resolver.go
    @@ -22,6 +22,7 @@ package resolver
     
     import (
     	"context"
    +	"errors"
     	"fmt"
     	"net"
     	"net/url"
    @@ -29,6 +30,7 @@ import (
     
     	"google.golang.org/grpc/attributes"
     	"google.golang.org/grpc/credentials"
    +	"google.golang.org/grpc/internal"
     	"google.golang.org/grpc/serviceconfig"
     )
     
    @@ -63,16 +65,18 @@ func Get(scheme string) Builder {
     }
     
     // SetDefaultScheme sets the default scheme that will be used. The default
    -// default scheme is "passthrough".
    +// scheme is initially set to "passthrough".
     //
     // NOTE: this function must only be called during initialization time (i.e. in
     // an init() function), and is not thread-safe. The scheme set last overrides
     // previously set values.
     func SetDefaultScheme(scheme string) {
     	defaultScheme = scheme
    +	internal.UserSetDefaultScheme = true
     }
     
    -// GetDefaultScheme gets the default scheme that will be used.
    +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial.  If
    +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
     func GetDefaultScheme() string {
     	return defaultScheme
     }
    @@ -168,6 +172,9 @@ type BuildOptions struct {
     	// field. In most cases though, it is not appropriate, and this field may
     	// be ignored.
     	Dialer func(context.Context, string) (net.Conn, error)
    +	// Authority is the effective authority of the clientconn for which the
    +	// resolver is built.
    +	Authority string
     }
     
     // An Endpoint is one network endpoint, or server, which may have multiple
    @@ -231,8 +238,8 @@ type ClientConn interface {
     	// UpdateState can be omitted.
     	UpdateState(State) error
     	// ReportError notifies the ClientConn that the Resolver encountered an
    -	// error.  The ClientConn will notify the load balancer and begin calling
    -	// ResolveNow on the Resolver with exponential backoff.
    +	// error. The ClientConn then forwards this error to the load balancing
    +	// policy.
     	ReportError(error)
     	// NewAddress is called by resolver to notify ClientConn a new list
     	// of resolved addresses.
    @@ -281,9 +288,9 @@ func (t Target) Endpoint() string {
     	return strings.TrimPrefix(endpoint, "/")
     }
     
    -// String returns a string representation of Target.
    +// String returns the canonical string representation of Target.
     func (t Target) String() string {
    -	return t.URL.String()
    +	return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
     }
     
     // Builder creates a resolver that will be used to watch name resolution updates.
    @@ -324,3 +331,20 @@ type AuthorityOverrider interface {
     	// typically in line, and must keep it unchanged.
     	OverrideAuthority(Target) string
     }
    +
    +// ValidateEndpoints validates endpoints from a petiole policy's perspective.
    +// Petiole policies should call this before calling into their children. See
    +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
    +// for details.
    +func ValidateEndpoints(endpoints []Endpoint) error {
    +	if len(endpoints) == 0 {
    +		return errors.New("endpoints list is empty")
    +	}
    +
    +	for _, endpoint := range endpoints {
    +		for range endpoint.Addresses {
    +			return nil
    +		}
    +	}
    +	return errors.New("endpoints list contains no addresses")
    +}
    diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
    index c79bab1214..23bb3fb258 100644
    --- a/vendor/google.golang.org/grpc/resolver_wrapper.go
    +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
    @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
     // any newly created ccResolverWrapper, except that close may be called instead.
     func (ccr *ccResolverWrapper) start() error {
     	errCh := make(chan error)
    -	ccr.serializer.Schedule(func(ctx context.Context) {
    +	ccr.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil {
     			return
     		}
    @@ -75,6 +75,7 @@ func (ccr *ccResolverWrapper) start() error {
     			DialCreds:            ccr.cc.dopts.copts.TransportCredentials,
     			CredsBundle:          ccr.cc.dopts.copts.CredsBundle,
     			Dialer:               ccr.cc.dopts.copts.Dialer,
    +			Authority:            ccr.cc.authority,
     		}
     		var err error
     		ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
    @@ -84,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error {
     }
     
     func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
    -	ccr.serializer.Schedule(func(ctx context.Context) {
    +	ccr.serializer.TrySchedule(func(ctx context.Context) {
     		if ctx.Err() != nil || ccr.resolver == nil {
     			return
     		}
    @@ -96,12 +97,12 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
     // finished shutting down, the channel should block on ccr.serializer.Done()
     // without cc.mu held.
     func (ccr *ccResolverWrapper) close() {
    -	channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver")
    +	channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
     	ccr.mu.Lock()
     	ccr.closed = true
     	ccr.mu.Unlock()
     
    -	ccr.serializer.Schedule(func(context.Context) {
    +	ccr.serializer.TrySchedule(func(context.Context) {
     		if ccr.resolver == nil {
     			return
     		}
    @@ -146,7 +147,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) {
     		return
     	}
     	ccr.mu.Unlock()
    -	channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
    +	channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
     	ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
     }
     
    @@ -170,12 +171,15 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
     // ParseServiceConfig is called by resolver implementations to parse a JSON
     // representation of the service config.
     func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
    -	return parseServiceConfig(scJSON)
    +	return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
     }
     
     // addChannelzTraceEvent adds a channelz trace event containing the new
     // state received from resolver implementations.
     func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
    +	if !logger.V(0) && !channelz.IsOn() {
    +		return
    +	}
     	var updates []string
     	var oldSC, newSC *ServiceConfig
     	var oldOK, newOK bool
    @@ -193,5 +197,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
     	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
     		updates = append(updates, "resolver returned new addresses")
     	}
    -	channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
    +	channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
     }
    diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
    index a4b6bc6873..9fac2b08b4 100644
    --- a/vendor/google.golang.org/grpc/rpc_util.go
    +++ b/vendor/google.golang.org/grpc/rpc_util.go
    @@ -19,7 +19,6 @@
     package grpc
     
     import (
    -	"bytes"
     	"compress/gzip"
     	"context"
     	"encoding/binary"
    @@ -35,6 +34,7 @@ import (
     	"google.golang.org/grpc/encoding"
     	"google.golang.org/grpc/encoding/proto"
     	"google.golang.org/grpc/internal/transport"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
     	"google.golang.org/grpc/stats"
    @@ -189,6 +189,20 @@ type EmptyCallOption struct{}
     func (EmptyCallOption) before(*callInfo) error      { return nil }
     func (EmptyCallOption) after(*callInfo, *csAttempt) {}
     
    +// StaticMethod returns a CallOption which specifies that a call is being made
    +// to a method that is static, which means the method is known at compile time
    +// and doesn't change at runtime. This can be used as a signal to stats plugins
    +// that this method is safe to include as a key to a measurement.
    +func StaticMethod() CallOption {
    +	return StaticMethodCallOption{}
    +}
    +
    +// StaticMethodCallOption is a CallOption that specifies that a call comes
    +// from a static method.
    +type StaticMethodCallOption struct {
    +	EmptyCallOption
    +}
    +
     // Header returns a CallOptions that retrieves the header metadata
     // for a unary RPC.
     func Header(md *metadata.MD) CallOption {
    @@ -206,8 +220,8 @@ type HeaderCallOption struct {
     	HeaderAddr *metadata.MD
     }
     
    -func (o HeaderCallOption) before(c *callInfo) error { return nil }
    -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
    +func (o HeaderCallOption) before(*callInfo) error { return nil }
    +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
     	*o.HeaderAddr, _ = attempt.s.Header()
     }
     
    @@ -228,8 +242,8 @@ type TrailerCallOption struct {
     	TrailerAddr *metadata.MD
     }
     
    -func (o TrailerCallOption) before(c *callInfo) error { return nil }
    -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
    +func (o TrailerCallOption) before(*callInfo) error { return nil }
    +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
     	*o.TrailerAddr = attempt.s.Trailer()
     }
     
    @@ -250,24 +264,20 @@ type PeerCallOption struct {
     	PeerAddr *peer.Peer
     }
     
    -func (o PeerCallOption) before(c *callInfo) error { return nil }
    -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
    +func (o PeerCallOption) before(*callInfo) error { return nil }
    +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
     	if x, ok := peer.FromContext(attempt.s.Context()); ok {
     		*o.PeerAddr = *x
     	}
     }
     
    -// WaitForReady configures the action to take when an RPC is attempted on broken
    -// connections or unreachable servers. If waitForReady is false and the
    -// connection is in the TRANSIENT_FAILURE state, the RPC will fail
    -// immediately. Otherwise, the RPC client will block the call until a
    -// connection is available (or the call is canceled or times out) and will
    -// retry the call if it fails due to a transient error.  gRPC will not retry if
    -// data was written to the wire unless the server indicates it did not process
    -// the data.  Please refer to
    -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
    +// WaitForReady configures the RPC's behavior when the client is in
    +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect.  If
    +// waitForReady is false, the RPC will fail immediately.  Otherwise, the client
    +// will wait until a connection becomes available or the RPC's deadline is
    +// reached.
     //
    -// By default, RPCs don't "wait for ready".
    +// By default, RPCs do not "wait for ready".
     func WaitForReady(waitForReady bool) CallOption {
     	return FailFastCallOption{FailFast: !waitForReady}
     }
    @@ -294,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
     	c.failFast = o.FailFast
     	return nil
     }
    -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
     
     // OnFinish returns a CallOption that configures a callback to be called when
     // the call completes. The error passed to the callback is the status of the
    @@ -329,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
     	return nil
     }
     
    -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
     
     // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
     // in bytes the client can receive. If this is not set, gRPC uses the default
    @@ -353,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
     	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
     	return nil
     }
    -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
     
     // MaxCallSendMsgSize returns a CallOption which sets the maximum message size
     // in bytes the client can send. If this is not set, gRPC uses the default
    @@ -377,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
     	c.maxSendMessageSize = &o.MaxSendMsgSize
     	return nil
     }
    -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
     
     // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
     // for a call.
    @@ -400,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
     	c.creds = o.Creds
     	return nil
     }
    -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
     
     // UseCompressor returns a CallOption which sets the compressor used when
     // sending the request.  If WithCompressor is also set, UseCompressor has
    @@ -428,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
     	c.compressorType = o.CompressorType
     	return nil
     }
    -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
     
     // CallContentSubtype returns a CallOption that will set the content-subtype
     // for a call. For example, if content-subtype is "json", the Content-Type over
    @@ -465,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
     	c.contentSubtype = o.ContentSubtype
     	return nil
     }
    -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
     
     // ForceCodec returns a CallOption that will set codec to be used for all
     // request and response messages for a call. The result of calling Name() will
    @@ -501,10 +511,50 @@ type ForceCodecCallOption struct {
     }
     
     func (o ForceCodecCallOption) before(c *callInfo) error {
    -	c.codec = o.Codec
    +	c.codec = newCodecV1Bridge(o.Codec)
     	return nil
     }
    -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
    +
    +// ForceCodecV2 returns a CallOption that will set codec to be used for all
    +// request and response messages for a call. The result of calling Name() will
    +// be used as the content-subtype after converting to lowercase, unless
    +// CallContentSubtype is also used.
    +//
    +// See Content-Type on
    +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
    +// more details. Also see the documentation on RegisterCodec and
    +// CallContentSubtype for more details on the interaction between Codec and
    +// content-subtype.
    +//
    +// This function is provided for advanced users; prefer to use only
    +// CallContentSubtype to select a registered codec instead.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func ForceCodecV2(codec encoding.CodecV2) CallOption {
    +	return ForceCodecV2CallOption{CodecV2: codec}
    +}
    +
    +// ForceCodecV2CallOption is a CallOption that indicates the codec used for
    +// marshaling messages.
    +//
    +// # Experimental
    +//
    +// Notice: This type is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +type ForceCodecV2CallOption struct {
    +	CodecV2 encoding.CodecV2
    +}
    +
    +func (o ForceCodecV2CallOption) before(c *callInfo) error {
    +	c.codec = o.CodecV2
    +	return nil
    +}
    +
    +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
     
     // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
     // an encoding.Codec.
    @@ -526,10 +576,10 @@ type CustomCodecCallOption struct {
     }
     
     func (o CustomCodecCallOption) before(c *callInfo) error {
    -	c.codec = o.Codec
    +	c.codec = newCodecV0Bridge(o.Codec)
     	return nil
     }
    -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
     
     // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
     // used for buffering this RPC's requests for retry purposes.
    @@ -557,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
     	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
     	return nil
     }
    -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
    +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
     
     // The format of the payload: compressed or not?
     type payloadFormat uint8
    @@ -567,19 +617,28 @@ const (
     	compressionMade payloadFormat = 1 // compressed
     )
     
    +func (pf payloadFormat) isCompressed() bool {
    +	return pf == compressionMade
    +}
    +
    +type streamReader interface {
    +	ReadMessageHeader(header []byte) error
    +	Read(n int) (mem.BufferSlice, error)
    +}
    +
     // parser reads complete gRPC messages from the underlying reader.
     type parser struct {
     	// r is the underlying reader.
     	// See the comment on recvMsg for the permissible
     	// error types.
    -	r io.Reader
    +	r streamReader
     
     	// The header of a gRPC message. Find more detail at
     	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
     	header [5]byte
     
    -	// recvBufferPool is the pool of shared receive buffers.
    -	recvBufferPool SharedBufferPool
    +	// bufferPool is the pool of shared receive buffers.
    +	bufferPool mem.BufferPool
     }
     
     // recvMsg reads a complete gRPC message from the stream.
    @@ -594,39 +653,38 @@ type parser struct {
     //   - an error from the status package
     //
     // No other error values or types must be returned, which also means
    -// that the underlying io.Reader must not return an incompatible
    +// that the underlying streamReader must not return an incompatible
     // error.
    -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
    -	if _, err := p.r.Read(p.header[:]); err != nil {
    +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
    +	err := p.r.ReadMessageHeader(p.header[:])
    +	if err != nil {
     		return 0, nil, err
     	}
     
    -	pf = payloadFormat(p.header[0])
    +	pf := payloadFormat(p.header[0])
     	length := binary.BigEndian.Uint32(p.header[1:])
     
    -	if length == 0 {
    -		return pf, nil, nil
    -	}
     	if int64(length) > int64(maxInt) {
     		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
     	}
     	if int(length) > maxReceiveMessageSize {
     		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
     	}
    -	msg = p.recvBufferPool.Get(int(length))
    -	if _, err := p.r.Read(msg); err != nil {
    +
    +	data, err := p.r.Read(int(length))
    +	if err != nil {
     		if err == io.EOF {
     			err = io.ErrUnexpectedEOF
     		}
     		return 0, nil, err
     	}
    -	return pf, msg, nil
    +	return pf, data, nil
     }
     
     // encode serializes msg and returns a buffer containing the message, or an
     // error if it is too large to be transmitted by grpc.  If msg is nil, it
     // generates an empty message.
    -func encode(c baseCodec, msg any) ([]byte, error) {
    +func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
     	if msg == nil { // NOTE: typed nils will not be caught by this check
     		return nil, nil
     	}
    @@ -634,7 +692,8 @@ func encode(c baseCodec, msg any) ([]byte, error) {
     	if err != nil {
     		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
     	}
    -	if uint(len(b)) > math.MaxUint32 {
    +	if uint(b.Len()) > math.MaxUint32 {
    +		b.Free()
     		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
     	}
     	return b, nil
    @@ -645,34 +704,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
     // indicating no compression was done.
     //
     // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
    -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
    -	if compressor == nil && cp == nil {
    -		return nil, nil
    -	}
    -	if len(in) == 0 {
    -		return nil, nil
    +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
    +	if (compressor == nil && cp == nil) || in.Len() == 0 {
    +		return nil, compressionNone, nil
     	}
    +	var out mem.BufferSlice
    +	w := mem.NewWriter(&out, pool)
     	wrapErr := func(err error) error {
    +		out.Free()
     		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
     	}
    -	cbuf := &bytes.Buffer{}
     	if compressor != nil {
    -		z, err := compressor.Compress(cbuf)
    +		z, err := compressor.Compress(w)
     		if err != nil {
    -			return nil, wrapErr(err)
    +			return nil, 0, wrapErr(err)
     		}
    -		if _, err := z.Write(in); err != nil {
    -			return nil, wrapErr(err)
    +		for _, b := range in {
    +			if _, err := z.Write(b.ReadOnlyData()); err != nil {
    +				return nil, 0, wrapErr(err)
    +			}
     		}
     		if err := z.Close(); err != nil {
    -			return nil, wrapErr(err)
    +			return nil, 0, wrapErr(err)
     		}
     	} else {
    -		if err := cp.Do(cbuf, in); err != nil {
    -			return nil, wrapErr(err)
    +		// This is obviously really inefficient since it fully materializes the data, but
    +		// there is no way around this with the old Compressor API. At least it attempts
    +		// to return the buffer to the provider, in the hopes it can be reused (maybe
    +		// even by a subsequent call to this very function).
    +		buf := in.MaterializeToBuffer(pool)
    +		defer buf.Free()
    +		if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
    +			return nil, 0, wrapErr(err)
     		}
     	}
    -	return cbuf.Bytes(), nil
    +	return out, compressionMade, nil
     }
     
     const (
    @@ -683,33 +749,36 @@ const (
     
     // msgHeader returns a 5-byte header for the message being transmitted and the
     // payload, which is compData if non-nil or data otherwise.
    -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
    +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
     	hdr = make([]byte, headerLen)
    -	if compData != nil {
    -		hdr[0] = byte(compressionMade)
    -		data = compData
    +	hdr[0] = byte(pf)
    +
    +	var length uint32
    +	if pf.isCompressed() {
    +		length = uint32(compData.Len())
    +		payload = compData
     	} else {
    -		hdr[0] = byte(compressionNone)
    +		length = uint32(data.Len())
    +		payload = data
     	}
     
     	// Write length of payload into buf
    -	binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
    -	return hdr, data
    +	binary.BigEndian.PutUint32(hdr[payloadLen:], length)
    +	return hdr, payload
     }
     
    -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
    +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
     	return &stats.OutPayload{
     		Client:           client,
     		Payload:          msg,
    -		Data:             data,
    -		Length:           len(data),
    -		WireLength:       len(payload) + headerLen,
    -		CompressedLength: len(payload),
    +		Length:           dataLength,
    +		WireLength:       payloadLength + headerLen,
    +		CompressedLength: payloadLength,
     		SentTime:         t,
     	}
     }
     
    -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
    +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
     	switch pf {
     	case compressionNone:
     	case compressionMade:
    @@ -717,7 +786,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
     			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
     		}
     		if !haveCompressor {
    -			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
    +			if isServer {
    +				return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
    +			}
    +			return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
     		}
     	default:
     		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
    @@ -727,88 +799,110 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
     
     type payloadInfo struct {
     	compressedLength  int // The compressed length got from wire.
    -	uncompressedBytes []byte
    +	uncompressedBytes mem.BufferSlice
     }
     
    -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
    -	pf, buf, err := p.recvMsg(maxReceiveMessageSize)
    +func (p *payloadInfo) free() {
    +	if p != nil && p.uncompressedBytes != nil {
    +		p.uncompressedBytes.Free()
    +	}
    +}
    +
    +// recvAndDecompress reads a message from the stream, decompressing it if necessary.
    +//
    +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
    +// the buffer is no longer needed.
    +// TODO: Refactor this function to reduce the number of arguments.
    +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
    +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
    +) (out mem.BufferSlice, err error) {
    +	pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
     	if err != nil {
     		return nil, err
     	}
    -	if payInfo != nil {
    -		payInfo.compressedLength = len(buf)
    -	}
     
    -	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
    +	compressedLength := compressed.Len()
    +
    +	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
    +		compressed.Free()
     		return nil, st.Err()
     	}
     
     	var size int
    -	if pf == compressionMade {
    +	if pf.isCompressed() {
    +		defer compressed.Free()
    +
     		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
     		// use this decompressor as the default.
     		if dc != nil {
    -			buf, err = dc.Do(bytes.NewReader(buf))
    -			size = len(buf)
    +			var uncompressedBuf []byte
    +			uncompressedBuf, err = dc.Do(compressed.Reader())
    +			if err == nil {
    +				out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)}
    +			}
    +			size = len(uncompressedBuf)
     		} else {
    -			buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
    +			out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
     		}
     		if err != nil {
     			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
     		}
     		if size > maxReceiveMessageSize {
    +			out.Free()
     			// TODO: Revisit the error code. Currently keep it consistent with java
     			// implementation.
     			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
     		}
    +	} else {
    +		out = compressed
     	}
    -	return buf, nil
    +
    +	if payInfo != nil {
    +		payInfo.compressedLength = compressedLength
    +		out.Ref()
    +		payInfo.uncompressedBytes = out
    +	}
    +
    +	return out, nil
     }
     
     // Using compressor, decompress d, returning data and size.
     // Optionally, if data will be over maxReceiveMessageSize, just return the size.
    -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
    -	dcReader, err := compressor.Decompress(bytes.NewReader(d))
    +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
    +	dcReader, err := compressor.Decompress(d.Reader())
     	if err != nil {
     		return nil, 0, err
     	}
    -	if sizer, ok := compressor.(interface {
    -		DecompressedSize(compressedBytes []byte) int
    -	}); ok {
    -		if size := sizer.DecompressedSize(d); size >= 0 {
    -			if size > maxReceiveMessageSize {
    -				return nil, size, nil
    -			}
    -			// size is used as an estimate to size the buffer, but we
    -			// will read more data if available.
    -			// +MinRead so ReadFrom will not reallocate if size is correct.
    -			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
    -			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
    -			return buf.Bytes(), int(bytesRead), err
    -		}
    +
    +	out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool)
    +	if err != nil {
    +		out.Free()
    +		return nil, 0, err
     	}
    -	// Read from LimitReader with limit max+1. So if the underlying
    -	// reader is over limit, the result will be bigger than max.
    -	d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
    -	return d, len(d), err
    +	return out, out.Len(), nil
    +}
    +
    +type recvCompressor interface {
    +	RecvCompress() string
     }
     
     // For the two compressor parameters, both should not be set, but if they are,
     // dc takes precedence over compressor.
     // TODO(dfawley): wrap the old compressor/decompressor using the new API?
    -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
    -	buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
    +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
    +	data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
     	if err != nil {
     		return err
     	}
    -	if err := c.Unmarshal(buf, m); err != nil {
    +
    +	// If the codec wants its own reference to the data, it can get it. Otherwise, always
    +	// free the buffers.
    +	defer data.Free()
    +
    +	if err := c.Unmarshal(data, m); err != nil {
     		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
     	}
    -	if payInfo != nil {
    -		payInfo.uncompressedBytes = buf
    -	} else {
    -		p.recvBufferPool.Put(&buf)
    -	}
    +
     	return nil
     }
     
    @@ -911,7 +1005,7 @@ func setCallInfoCodec(c *callInfo) error {
     			// encoding.Codec (Name vs. String method name).  We only support
     			// setting content subtype from encoding.Codec to avoid a behavior
     			// change with the deprecated version.
    -			if ec, ok := c.codec.(encoding.Codec); ok {
    +			if ec, ok := c.codec.(encoding.CodecV2); ok {
     				c.contentSubtype = strings.ToLower(ec.Name())
     			}
     		}
    @@ -920,34 +1014,21 @@ func setCallInfoCodec(c *callInfo) error {
     
     	if c.contentSubtype == "" {
     		// No codec specified in CallOptions; use proto by default.
    -		c.codec = encoding.GetCodec(proto.Name)
    +		c.codec = getCodec(proto.Name)
     		return nil
     	}
     
     	// c.contentSubtype is already lowercased in CallContentSubtype
    -	c.codec = encoding.GetCodec(c.contentSubtype)
    +	c.codec = getCodec(c.contentSubtype)
     	if c.codec == nil {
     		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
     	}
     	return nil
     }
     
    -// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
    -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
    -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
    -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
    -type channelzData struct {
    -	callsStarted   int64
    -	callsFailed    int64
    -	callsSucceeded int64
    -	// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
    -	// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
    -	lastCallStartedTime int64
    -}
    -
     // The SupportPackageIsVersion variables are referenced from generated protocol
     // buffer files to ensure compatibility with the gRPC version used.  The latest
    -// support package version is 7.
    +// support package version is 9.
     //
     // Older versions are kept for compatibility.
     //
    @@ -958,6 +1039,8 @@ const (
     	SupportPackageIsVersion5 = true
     	SupportPackageIsVersion6 = true
     	SupportPackageIsVersion7 = true
    +	SupportPackageIsVersion8 = true
    +	SupportPackageIsVersion9 = true
     )
     
     const grpcUA = "grpc-go/" + Version
    diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
    index e89c5ac613..16065a027a 100644
    --- a/vendor/google.golang.org/grpc/server.go
    +++ b/vendor/google.golang.org/grpc/server.go
    @@ -33,8 +33,6 @@ import (
     	"sync/atomic"
     	"time"
     
    -	"golang.org/x/net/trace"
    -
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/credentials"
     	"google.golang.org/grpc/encoding"
    @@ -47,6 +45,7 @@ import (
     	"google.golang.org/grpc/internal/grpcutil"
     	"google.golang.org/grpc/internal/transport"
     	"google.golang.org/grpc/keepalive"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
     	"google.golang.org/grpc/stats"
    @@ -82,18 +81,19 @@ func init() {
     	}
     	internal.BinaryLogger = binaryLogger
     	internal.JoinServerOptions = newJoinServerOption
    -	internal.RecvBufferPool = recvBufferPool
    +	internal.BufferPool = bufferPool
     }
     
     var statusOK = status.New(codes.OK, "")
     var logger = grpclog.Component("core")
     
    -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
    +// MethodHandler is a function type that processes a unary RPC method call.
    +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
     
     // MethodDesc represents an RPC service's method specification.
     type MethodDesc struct {
     	MethodName string
    -	Handler    methodHandler
    +	Handler    MethodHandler
     }
     
     // ServiceDesc represents an RPC service's specification.
    @@ -131,7 +131,7 @@ type Server struct {
     	drain    bool
     	cv       *sync.Cond              // signaled when connections close for GracefulStop
     	services map[string]*serviceInfo // service name -> service info
    -	events   trace.EventLog
    +	events   traceEventLog
     
     	quit               *grpcsync.Event
     	done               *grpcsync.Event
    @@ -139,8 +139,7 @@ type Server struct {
     	serveWG            sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
     	handlersWG         sync.WaitGroup // counts active method handler goroutines
     
    -	channelzID *channelz.Identifier
    -	czData     *channelzData
    +	channelz *channelz.Server
     
     	serverWorkerChannel      chan func()
     	serverWorkerChannelClose func()
    @@ -173,7 +172,7 @@ type serverOptions struct {
     	maxHeaderListSize     *uint32
     	headerTableSize       *uint32
     	numServerWorkers      uint32
    -	recvBufferPool        SharedBufferPool
    +	bufferPool            mem.BufferPool
     	waitForHandlers       bool
     }
     
    @@ -184,7 +183,7 @@ var defaultServerOptions = serverOptions{
     	connectionTimeout:     120 * time.Second,
     	writeBufferSize:       defaultWriteBufSize,
     	readBufferSize:        defaultReadBufSize,
    -	recvBufferPool:        nopBufferPool{},
    +	bufferPool:            mem.DefaultBufferPool(),
     }
     var globalServerOptions []ServerOption
     
    @@ -251,11 +250,9 @@ func SharedWriteBuffer(val bool) ServerOption {
     }
     
     // WriteBufferSize determines how much data can be batched before doing a write
    -// on the wire. The corresponding memory allocation for this buffer will be
    -// twice the size to keep syscalls low. The default value for this buffer is
    -// 32KB. Zero or negative values will disable the write buffer such that each
    -// write will be on underlying connection.
    -// Note: A Send call may not directly translate to a write.
    +// on the wire. The default value for this buffer is 32KB. Zero or negative
    +// values will disable the write buffer such that each write will be on underlying
    +// connection. Note: A Send call may not directly translate to a write.
     func WriteBufferSize(s int) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
     		o.writeBufferSize = s
    @@ -318,7 +315,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
     // Will be supported throughout 1.x.
     func CustomCodec(codec Codec) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
    -		o.codec = codec
    +		o.codec = newCodecV0Bridge(codec)
     	})
     }
     
    @@ -347,7 +344,22 @@ func CustomCodec(codec Codec) ServerOption {
     // later release.
     func ForceServerCodec(codec encoding.Codec) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
    -		o.codec = codec
    +		o.codec = newCodecV1Bridge(codec)
    +	})
    +}
    +
    +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
    +// CodecV2 interface.
    +//
    +// Will be supported throughout 1.x.
    +//
    +// # Experimental
    +//
    +// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    +// later release.
    +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
    +	return newFuncServerOption(func(o *serverOptions) {
    +		o.codec = codecV2
     	})
     }
     
    @@ -532,12 +544,22 @@ func ConnectionTimeout(d time.Duration) ServerOption {
     	})
     }
     
    +// MaxHeaderListSizeServerOption is a ServerOption that sets the max
    +// (uncompressed) size of header list that the server is prepared to accept.
    +type MaxHeaderListSizeServerOption struct {
    +	MaxHeaderListSize uint32
    +}
    +
    +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
    +	so.maxHeaderListSize = &o.MaxHeaderListSize
    +}
    +
     // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
     // of header list that the server is prepared to accept.
     func MaxHeaderListSize(s uint32) ServerOption {
    -	return newFuncServerOption(func(o *serverOptions) {
    -		o.maxHeaderListSize = &s
    -	})
    +	return MaxHeaderListSizeServerOption{
    +		MaxHeaderListSize: s,
    +	}
     }
     
     // HeaderTableSize returns a ServerOption that sets the size of dynamic
    @@ -587,26 +609,9 @@ func WaitForHandlers(w bool) ServerOption {
     	})
     }
     
    -// RecvBufferPool returns a ServerOption that configures the server
    -// to use the provided shared buffer pool for parsing incoming messages. Depending
    -// on the application's workload, this could result in reduced memory allocation.
    -//
    -// If you are unsure about how to implement a memory pool but want to utilize one,
    -// begin with grpc.NewSharedBufferPool.
    -//
    -// Note: The shared buffer pool feature will not be active if any of the following
    -// options are used: StatsHandler, EnableTracing, or binary logging. In such
    -// cases, the shared buffer pool will be ignored.
    -//
    -// Deprecated: use experimental.WithRecvBufferPool instead.  Will be deleted in
    -// v1.60.0 or later.
    -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
    -	return recvBufferPool(bufferPool)
    -}
    -
    -func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
    +func bufferPool(bufferPool mem.BufferPool) ServerOption {
     	return newFuncServerOption(func(o *serverOptions) {
    -		o.recvBufferPool = bufferPool
    +		o.bufferPool = bufferPool
     	})
     }
     
    @@ -617,8 +622,8 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
     // workload (assuming a QPS of a few thousand requests/sec).
     const serverWorkerResetThreshold = 1 << 16
     
    -// serverWorkers blocks on a *transport.Stream channel forever and waits for
    -// data to be fed by serveStreams. This allows multiple requests to be
    +// serverWorker blocks on a *transport.ServerStream channel forever and waits
    +// for data to be fed by serveStreams. This allows multiple requests to be
     // processed by the same goroutine, removing the need for expensive stack
     // re-allocations (see the runtime.morestack problem [1]).
     //
    @@ -663,22 +668,21 @@ func NewServer(opt ...ServerOption) *Server {
     		services: make(map[string]*serviceInfo),
     		quit:     grpcsync.NewEvent(),
     		done:     grpcsync.NewEvent(),
    -		czData:   new(channelzData),
    +		channelz: channelz.RegisterServer(""),
     	}
     	chainUnaryServerInterceptors(s)
     	chainStreamServerInterceptors(s)
     	s.cv = sync.NewCond(&s.mu)
     	if EnableTracing {
     		_, file, line, _ := runtime.Caller(1)
    -		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
    +		s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
     	}
     
     	if s.opts.numServerWorkers > 0 {
     		s.initServerWorkers()
     	}
     
    -	s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
    -	channelz.Info(logger, s.channelzID, "Server created")
    +	channelz.Info(logger, s.channelz, "Server created")
     	return s
     }
     
    @@ -804,20 +808,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
     
     type listenSocket struct {
     	net.Listener
    -	channelzID *channelz.Identifier
    -}
    -
    -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
    -	return &channelz.SocketInternalMetric{
    -		SocketOptions: channelz.GetSocketOption(l.Listener),
    -		LocalAddr:     l.Listener.Addr(),
    -	}
    +	channelz *channelz.Socket
     }
     
     func (l *listenSocket) Close() error {
     	err := l.Listener.Close()
    -	channelz.RemoveEntry(l.channelzID)
    -	channelz.Info(logger, l.channelzID, "ListenSocket deleted")
    +	channelz.RemoveEntry(l.channelz.ID)
    +	channelz.Info(logger, l.channelz, "ListenSocket deleted")
     	return err
     }
     
    @@ -859,7 +856,16 @@ func (s *Server) Serve(lis net.Listener) error {
     		}
     	}()
     
    -	ls := &listenSocket{Listener: lis}
    +	ls := &listenSocket{
    +		Listener: lis,
    +		channelz: channelz.RegisterSocket(&channelz.Socket{
    +			SocketType:    channelz.SocketTypeListen,
    +			Parent:        s.channelz,
    +			RefName:       lis.Addr().String(),
    +			LocalAddr:     lis.Addr(),
    +			SocketOptions: channelz.GetSocketOption(lis)},
    +		),
    +	}
     	s.lis[ls] = true
     
     	defer func() {
    @@ -871,14 +877,8 @@ func (s *Server) Serve(lis net.Listener) error {
     		s.mu.Unlock()
     	}()
     
    -	var err error
    -	ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
    -	if err != nil {
    -		s.mu.Unlock()
    -		return err
    -	}
     	s.mu.Unlock()
    -	channelz.Info(logger, ls.channelzID, "ListenSocket created")
    +	channelz.Info(logger, ls.channelz, "ListenSocket created")
     
     	var tempDelay time.Duration // how long to sleep on accept failure
     	for {
    @@ -977,9 +977,10 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
     		WriteBufferSize:       s.opts.writeBufferSize,
     		ReadBufferSize:        s.opts.readBufferSize,
     		SharedWriteBuffer:     s.opts.sharedWriteBuffer,
    -		ChannelzParentID:      s.channelzID,
    +		ChannelzParent:        s.channelz,
     		MaxHeaderListSize:     s.opts.maxHeaderListSize,
     		HeaderTableSize:       s.opts.headerTableSize,
    +		BufferPool:            s.opts.bufferPool,
     	}
     	st, err := transport.NewServerTransport(c, config)
     	if err != nil {
    @@ -991,7 +992,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
     		if err != credentials.ErrConnDispatched {
     			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
     			if err != io.EOF {
    -				channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
    +				channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
     			}
     			c.Close()
     		}
    @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
     	}()
     
     	streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
    -	st.HandleStreams(ctx, func(stream *transport.Stream) {
    +	st.HandleStreams(ctx, func(stream *transport.ServerStream) {
     		s.handlersWG.Add(1)
     		streamQuota.acquire()
     		f := func() {
    @@ -1072,7 +1073,7 @@ var _ http.Handler = (*Server)(nil)
     // Notice: This API is EXPERIMENTAL and may be changed or removed in a
     // later release.
     func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
    -	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
    +	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
     	if err != nil {
     		// Errors returned from transport.NewServerHandlerTransport have
     		// already been written to w.
    @@ -1123,48 +1124,54 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) {
     	}
     }
     
    -func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
    -	return &channelz.ServerInternalMetric{
    -		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
    -		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
    -		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
    -		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
    -	}
    -}
    -
     func (s *Server) incrCallsStarted() {
    -	atomic.AddInt64(&s.czData.callsStarted, 1)
    -	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
    +	s.channelz.ServerMetrics.CallsStarted.Add(1)
    +	s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
     }
     
     func (s *Server) incrCallsSucceeded() {
    -	atomic.AddInt64(&s.czData.callsSucceeded, 1)
    +	s.channelz.ServerMetrics.CallsSucceeded.Add(1)
     }
     
     func (s *Server) incrCallsFailed() {
    -	atomic.AddInt64(&s.czData.callsFailed, 1)
    +	s.channelz.ServerMetrics.CallsFailed.Add(1)
     }
     
    -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
    +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
     	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
     	if err != nil {
    -		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
    +		channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
     		return err
     	}
    -	compData, err := compress(data, cp, comp)
    +
    +	compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
     	if err != nil {
    -		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
    +		data.Free()
    +		channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
     		return err
     	}
    -	hdr, payload := msgHeader(data, compData)
    +
    +	hdr, payload := msgHeader(data, compData, pf)
    +
    +	defer func() {
    +		compData.Free()
    +		data.Free()
    +		// payload does not need to be freed here, it is either data or compData, both of
    +		// which are already freed.
    +	}()
    +
    +	dataLen := data.Len()
    +	payloadLen := payload.Len()
     	// TODO(dfawley): should we be checking len(data) instead?
    -	if len(payload) > s.opts.maxSendMessageSize {
    -		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
    +	if payloadLen > s.opts.maxSendMessageSize {
    +		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
     	}
    -	err = t.Write(stream, hdr, payload, opts)
    +	err = stream.Write(hdr, payload, opts)
     	if err == nil {
    -		for _, sh := range s.opts.statsHandlers {
    -			sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
    +		if len(s.opts.statsHandlers) != 0 {
    +			for _, sh := range s.opts.statsHandlers {
    +				sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
    +			}
     		}
     	}
     	return err
    @@ -1206,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
     	}
     }
     
    -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
    +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
     	shs := s.opts.statsHandlers
     	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
     		if channelz.IsOn() {
    @@ -1314,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     		decomp = encoding.GetCompressor(rc)
     		if decomp == nil {
     			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
    -			t.WriteStatus(stream, st)
    +			stream.WriteStatus(st)
     			return st.Err()
     		}
     	}
    @@ -1343,34 +1350,34 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     	var payInfo *payloadInfo
     	if len(shs) != 0 || len(binlogs) != 0 {
     		payInfo = &payloadInfo{}
    +		defer payInfo.free()
     	}
    -	d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
    +
    +	d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
     	if err != nil {
    -		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
    -			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
    +		if e := stream.WriteStatus(status.Convert(err)); e != nil {
    +			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
     		}
     		return err
     	}
    -	if channelz.IsOn() {
    -		t.IncrMsgRecv()
    -	}
    +	defer d.Free()
     	df := func(v any) error {
     		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
     			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
     		}
    +
     		for _, sh := range shs {
     			sh.HandleRPC(ctx, &stats.InPayload{
     				RecvTime:         time.Now(),
     				Payload:          v,
    -				Length:           len(d),
    +				Length:           d.Len(),
     				WireLength:       payInfo.compressedLength + headerLen,
     				CompressedLength: payInfo.compressedLength,
    -				Data:             d,
     			})
     		}
     		if len(binlogs) != 0 {
     			cm := &binarylog.ClientMessage{
    -				Message: d,
    +				Message: d.Materialize(),
     			}
     			for _, binlog := range binlogs {
     				binlog.Log(ctx, cm)
    @@ -1395,8 +1402,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
     			trInfo.tr.SetError()
     		}
    -		if e := t.WriteStatus(stream, appStatus); e != nil {
    -			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
    +		if e := stream.WriteStatus(appStatus); e != nil {
    +			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
     		}
     		if len(binlogs) != 0 {
     			if h, _ := stream.Header(); h.Len() > 0 {
    @@ -1422,21 +1429,21 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     	if trInfo != nil {
     		trInfo.tr.LazyLog(stringer("OK"), false)
     	}
    -	opts := &transport.Options{Last: true}
    +	opts := &transport.WriteOptions{Last: true}
     
     	// Server handler could have set new compressor by calling SetSendCompressor.
     	// In case it is set, we need to use it for compressing outbound message.
     	if stream.SendCompress() != sendCompressorName {
     		comp = encoding.GetCompressor(stream.SendCompress())
     	}
    -	if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
    +	if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
     		if err == io.EOF {
     			// The entire stream is done (for unary RPC only).
     			return err
     		}
     		if sts, ok := status.FromError(err); ok {
    -			if e := t.WriteStatus(stream, sts); e != nil {
    -				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
    +			if e := stream.WriteStatus(sts); e != nil {
    +				channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
     			}
     		} else {
     			switch st := err.(type) {
    @@ -1475,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     			binlog.Log(ctx, sm)
     		}
     	}
    -	if channelz.IsOn() {
    -		t.IncrMsgSent()
    -	}
     	if trInfo != nil {
     		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
     	}
    @@ -1493,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
     			binlog.Log(ctx, st)
     		}
     	}
    -	return t.WriteStatus(stream, statusOK)
    +	return stream.WriteStatus(statusOK)
     }
     
     // chainStreamServerInterceptors chains all stream server interceptors into one.
    @@ -1532,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
     	}
     }
     
    -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
    +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
     	if channelz.IsOn() {
     		s.incrCallsStarted()
     	}
    @@ -1552,9 +1556,8 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
     	ctx = NewContextWithServerTransportStream(ctx, stream)
     	ss := &serverStream{
     		ctx:                   ctx,
    -		t:                     t,
     		s:                     stream,
    -		p:                     &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
    +		p:                     &parser{r: stream, bufferPool: s.opts.bufferPool},
     		codec:                 s.getCodec(stream.ContentSubtype()),
     		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
     		maxSendMessageSize:    s.opts.maxSendMessageSize,
    @@ -1639,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
     		ss.decomp = encoding.GetCompressor(rc)
     		if ss.decomp == nil {
     			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
    -			t.WriteStatus(ss.s, st)
    +			ss.s.WriteStatus(st)
     			return st.Err()
     		}
     	}
    @@ -1708,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
     				binlog.Log(ctx, st)
     			}
     		}
    -		t.WriteStatus(ss.s, appStatus)
    +		ss.s.WriteStatus(appStatus)
     		// TODO: Should we log an error from WriteStatus here and below?
     		return appErr
     	}
    @@ -1726,16 +1729,16 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
     			binlog.Log(ctx, st)
     		}
     	}
    -	return t.WriteStatus(ss.s, statusOK)
    +	return ss.s.WriteStatus(statusOK)
     }
     
    -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
    +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
     	ctx := stream.Context()
     	ctx = contextWithServer(ctx, s)
     	var ti *traceInfo
     	if EnableTracing {
    -		tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
    -		ctx = trace.NewContext(ctx, tr)
    +		tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
    +		ctx = newTraceContext(ctx, tr)
     		ti = &traceInfo{
     			tr: tr,
     			firstLine: firstLine{
    @@ -1759,12 +1762,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
     			ti.tr.SetError()
     		}
     		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
    -		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
    +		if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
     			if ti != nil {
     				ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
     				ti.tr.SetError()
     			}
    -			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
    +			channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
     		}
     		if ti != nil {
     			ti.tr.Finish()
    @@ -1774,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
     	service := sm[:pos]
     	method := sm[pos+1:]
     
    -	md, _ := metadata.FromIncomingContext(ctx)
    -	for _, sh := range s.opts.statsHandlers {
    -		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
    -		sh.HandleRPC(ctx, &stats.InHeader{
    -			FullMethod:  stream.Method(),
    -			RemoteAddr:  t.Peer().Addr,
    -			LocalAddr:   t.Peer().LocalAddr,
    -			Compression: stream.RecvCompress(),
    -			WireLength:  stream.HeaderWireLength(),
    -			Header:      md,
    -		})
    +	// FromIncomingContext is expensive: skip if there are no statsHandlers
    +	if len(s.opts.statsHandlers) > 0 {
    +		md, _ := metadata.FromIncomingContext(ctx)
    +		for _, sh := range s.opts.statsHandlers {
    +			ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
    +			sh.HandleRPC(ctx, &stats.InHeader{
    +				FullMethod:  stream.Method(),
    +				RemoteAddr:  t.Peer().Addr,
    +				LocalAddr:   t.Peer().LocalAddr,
    +				Compression: stream.RecvCompress(),
    +				WireLength:  stream.HeaderWireLength(),
    +				Header:      md,
    +			})
    +		}
     	}
     	// To have calls in stream callouts work. Will delete once all stats handler
     	// calls come from the gRPC layer.
    @@ -1793,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
     	srv, knownService := s.services[service]
     	if knownService {
     		if md, ok := srv.methods[method]; ok {
    -			s.processUnaryRPC(ctx, t, stream, srv, md, ti)
    +			s.processUnaryRPC(ctx, stream, srv, md, ti)
     			return
     		}
     		if sd, ok := srv.streams[method]; ok {
    -			s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
    +			s.processStreamingRPC(ctx, stream, srv, sd, ti)
     			return
     		}
     	}
     	// Unknown service, or known server unknown method.
     	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
    -		s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
    +		s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
     		return
     	}
     	var errDesc string
    @@ -1816,12 +1822,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
     		ti.tr.LazyPrintf("%s", errDesc)
     		ti.tr.SetError()
     	}
    -	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
    +	if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
     		if ti != nil {
     			ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
     			ti.tr.SetError()
     		}
    -		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
    +		channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
     	}
     	if ti != nil {
     		ti.tr.Finish()
    @@ -1893,8 +1899,7 @@ func (s *Server) stop(graceful bool) {
     	s.quit.Fire()
     	defer s.done.Fire()
     
    -	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
    -
    +	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
     	s.mu.Lock()
     	s.closeListenersLocked()
     	// Wait for serving threads to be ready to exit.  Only then can we be sure no
    @@ -1970,12 +1975,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
     		return s.opts.codec
     	}
     	if contentSubtype == "" {
    -		return encoding.GetCodec(proto.Name)
    +		return getCodec(proto.Name)
     	}
    -	codec := encoding.GetCodec(contentSubtype)
    +	codec := getCodec(contentSubtype)
     	if codec == nil {
     		logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
    -		return encoding.GetCodec(proto.Name)
    +		return getCodec(proto.Name)
     	}
     	return codec
     }
    @@ -2092,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
     // Notice: This function is EXPERIMENTAL and may be changed or removed in a
     // later release.
     func SetSendCompressor(ctx context.Context, name string) error {
    -	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
    +	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
     	if !ok || stream == nil {
     		return fmt.Errorf("failed to fetch the stream from the given context")
     	}
    @@ -2114,12 +2119,12 @@ func SetSendCompressor(ctx context.Context, name string) error {
     // Notice: This function is EXPERIMENTAL and may be changed or removed in a
     // later release.
     func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
    -	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
    +	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
     	if !ok || stream == nil {
     		return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
     	}
     
    -	return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
    +	return stream.ClientAdvertisedCompressors(), nil
     }
     
     // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
    @@ -2149,17 +2154,9 @@ func Method(ctx context.Context) (string, bool) {
     	return s.Method(), true
     }
     
    -type channelzServer struct {
    -	s *Server
    -}
    -
    -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
    -	return c.s.channelzMetric()
    -}
    -
     // validateSendCompressor returns an error when given compressor name cannot be
     // handled by the server or the client based on the advertised compressors.
    -func validateSendCompressor(name, clientCompressors string) error {
    +func validateSendCompressor(name string, clientCompressors []string) error {
     	if name == encoding.Identity {
     		return nil
     	}
    @@ -2168,7 +2165,7 @@ func validateSendCompressor(name, clientCompressors string) error {
     		return fmt.Errorf("compressor not registered %q", name)
     	}
     
    -	for _, c := range strings.Split(clientCompressors, ",") {
    +	for _, c := range clientCompressors {
     		if c == name {
     			return nil // found match
     		}
    diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
    index 0df11fc098..7e83027d19 100644
    --- a/vendor/google.golang.org/grpc/service_config.go
    +++ b/vendor/google.golang.org/grpc/service_config.go
    @@ -25,8 +25,11 @@ import (
     	"reflect"
     	"time"
     
    +	"google.golang.org/grpc/balancer"
    +	"google.golang.org/grpc/balancer/pickfirst"
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/internal"
    +	"google.golang.org/grpc/internal/balancer/gracefulswitch"
     	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
     	"google.golang.org/grpc/serviceconfig"
     )
    @@ -41,11 +44,6 @@ const maxInt = int(^uint(0) >> 1)
     // https://github.com/grpc/grpc/blob/master/doc/service_config.md
     type MethodConfig = internalserviceconfig.MethodConfig
     
    -type lbConfig struct {
    -	name string
    -	cfg  serviceconfig.LoadBalancingConfig
    -}
    -
     // ServiceConfig is provided by the service provider and contains parameters for how
     // clients that connect to the service should behave.
     //
    @@ -55,14 +53,9 @@ type lbConfig struct {
     type ServiceConfig struct {
     	serviceconfig.Config
     
    -	// LB is the load balancer the service providers recommends.  This is
    -	// deprecated; lbConfigs is preferred.  If lbConfig and LB are both present,
    -	// lbConfig will be used.
    -	LB *string
    -
     	// lbConfig is the service config's load balancing configuration.  If
     	// lbConfig and LB are both present, lbConfig will be used.
    -	lbConfig *lbConfig
    +	lbConfig serviceconfig.LoadBalancingConfig
     
     	// Methods contains a map for the methods in this service.  If there is an
     	// exact match for a method (i.e. /service/method) in the map, use the
    @@ -164,38 +157,56 @@ type jsonMC struct {
     // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
     type jsonSC struct {
     	LoadBalancingPolicy *string
    -	LoadBalancingConfig *internalserviceconfig.BalancerConfig
    +	LoadBalancingConfig *json.RawMessage
     	MethodConfig        *[]jsonMC
     	RetryThrottling     *retryThrottlingPolicy
     	HealthCheckConfig   *healthCheckConfig
     }
     
     func init() {
    -	internal.ParseServiceConfig = parseServiceConfig
    +	internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
    +		return parseServiceConfig(js, defaultMaxCallAttempts)
    +	}
     }
    -func parseServiceConfig(js string) *serviceconfig.ParseResult {
    +
    +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
     	if len(js) == 0 {
     		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
     	}
     	var rsc jsonSC
     	err := json.Unmarshal([]byte(js), &rsc)
     	if err != nil {
    -		logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
    +		logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
     		return &serviceconfig.ParseResult{Err: err}
     	}
     	sc := ServiceConfig{
    -		LB:                rsc.LoadBalancingPolicy,
     		Methods:           make(map[string]MethodConfig),
     		retryThrottling:   rsc.RetryThrottling,
     		healthCheckConfig: rsc.HealthCheckConfig,
     		rawJSONString:     js,
     	}
    -	if c := rsc.LoadBalancingConfig; c != nil {
    -		sc.lbConfig = &lbConfig{
    -			name: c.Name,
    -			cfg:  c.Config,
    +	c := rsc.LoadBalancingConfig
    +	if c == nil {
    +		name := pickfirst.Name
    +		if rsc.LoadBalancingPolicy != nil {
    +			name = *rsc.LoadBalancingPolicy
    +		}
    +		if balancer.Get(name) == nil {
    +			name = pickfirst.Name
     		}
    +		cfg := []map[string]any{{name: struct{}{}}}
    +		strCfg, err := json.Marshal(cfg)
    +		if err != nil {
    +			return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
    +		}
    +		r := json.RawMessage(strCfg)
    +		c = &r
    +	}
    +	cfg, err := gracefulswitch.ParseConfig(*c)
    +	if err != nil {
    +		return &serviceconfig.ParseResult{Err: err}
     	}
    +	sc.lbConfig = cfg
     
     	if rsc.MethodConfig == nil {
     		return &serviceconfig.ParseResult{Config: &sc}
    @@ -211,8 +222,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
     			WaitForReady: m.WaitForReady,
     			Timeout:      (*time.Duration)(m.Timeout),
     		}
    -		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
    -			logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
    +		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
    +			logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
     			return &serviceconfig.ParseResult{Err: err}
     		}
     		if m.MaxRequestMessageBytes != nil {
    @@ -232,13 +243,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
     		for i, n := range *m.Name {
     			path, err := n.generatePath()
     			if err != nil {
    -				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
    +				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
     				return &serviceconfig.ParseResult{Err: err}
     			}
     
     			if _, ok := paths[path]; ok {
     				err = errDuplicatedName
    -				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
    +				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
     				return &serviceconfig.ParseResult{Err: err}
     			}
     			paths[path] = struct{}{}
    @@ -257,7 +268,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
     	return &serviceconfig.ParseResult{Config: &sc}
     }
     
    -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
    +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
     	if jrp == nil {
     		return nil, nil
     	}
    @@ -271,24 +282,23 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
     		return nil, nil
     	}
     
    +	if jrp.MaxAttempts < maxAttempts {
    +		maxAttempts = jrp.MaxAttempts
    +	}
     	rp := &internalserviceconfig.RetryPolicy{
    -		MaxAttempts:          jrp.MaxAttempts,
    +		MaxAttempts:          maxAttempts,
     		InitialBackoff:       time.Duration(jrp.InitialBackoff),
     		MaxBackoff:           time.Duration(jrp.MaxBackoff),
     		BackoffMultiplier:    jrp.BackoffMultiplier,
     		RetryableStatusCodes: make(map[codes.Code]bool),
     	}
    -	if rp.MaxAttempts > 5 {
    -		// TODO(retry): Make the max maxAttempts configurable.
    -		rp.MaxAttempts = 5
    -	}
     	for _, code := range jrp.RetryableStatusCodes {
     		rp.RetryableStatusCodes[code] = true
     	}
     	return rp, nil
     }
     
    -func min(a, b *int) *int {
    +func minPointers(a, b *int) *int {
     	if *a < *b {
     		return a
     	}
    @@ -300,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
     		return &defaultVal
     	}
     	if mcMax != nil && doptMax != nil {
    -		return min(mcMax, doptMax)
    +		return minPointers(mcMax, doptMax)
     	}
     	if mcMax != nil {
     		return mcMax
    diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go
    deleted file mode 100644
    index 48a64cfe8e..0000000000
    --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    - *
    - * Copyright 2023 gRPC authors.
    - *
    - * Licensed under the Apache License, Version 2.0 (the "License");
    - * you may not use this file except in compliance with the License.
    - * You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - *
    - */
    -
    -package grpc
    -
    -import "sync"
    -
    -// SharedBufferPool is a pool of buffers that can be shared, resulting in
    -// decreased memory allocation. Currently, in gRPC-go, it is only utilized
    -// for parsing incoming messages.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
    -type SharedBufferPool interface {
    -	// Get returns a buffer with specified length from the pool.
    -	//
    -	// The returned byte slice may be not zero initialized.
    -	Get(length int) []byte
    -
    -	// Put returns a buffer to the pool.
    -	Put(*[]byte)
    -}
    -
    -// NewSharedBufferPool creates a simple SharedBufferPool with buckets
    -// of different sizes to optimize memory usage. This prevents the pool from
    -// wasting large amounts of memory, even when handling messages of varying sizes.
    -//
    -// # Experimental
    -//
    -// Notice: This API is EXPERIMENTAL and may be changed or removed in a
    -// later release.
    -func NewSharedBufferPool() SharedBufferPool {
    -	return &simpleSharedBufferPool{
    -		pools: [poolArraySize]simpleSharedBufferChildPool{
    -			newBytesPool(level0PoolMaxSize),
    -			newBytesPool(level1PoolMaxSize),
    -			newBytesPool(level2PoolMaxSize),
    -			newBytesPool(level3PoolMaxSize),
    -			newBytesPool(level4PoolMaxSize),
    -			newBytesPool(0),
    -		},
    -	}
    -}
    -
    -// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
    -type simpleSharedBufferPool struct {
    -	pools [poolArraySize]simpleSharedBufferChildPool
    -}
    -
    -func (p *simpleSharedBufferPool) Get(size int) []byte {
    -	return p.pools[p.poolIdx(size)].Get(size)
    -}
    -
    -func (p *simpleSharedBufferPool) Put(bs *[]byte) {
    -	p.pools[p.poolIdx(cap(*bs))].Put(bs)
    -}
    -
    -func (p *simpleSharedBufferPool) poolIdx(size int) int {
    -	switch {
    -	case size <= level0PoolMaxSize:
    -		return level0PoolIdx
    -	case size <= level1PoolMaxSize:
    -		return level1PoolIdx
    -	case size <= level2PoolMaxSize:
    -		return level2PoolIdx
    -	case size <= level3PoolMaxSize:
    -		return level3PoolIdx
    -	case size <= level4PoolMaxSize:
    -		return level4PoolIdx
    -	default:
    -		return levelMaxPoolIdx
    -	}
    -}
    -
    -const (
    -	level0PoolMaxSize = 16                     //  16  B
    -	level1PoolMaxSize = level0PoolMaxSize * 16 // 256  B
    -	level2PoolMaxSize = level1PoolMaxSize * 16 //   4 KB
    -	level3PoolMaxSize = level2PoolMaxSize * 16 //  64 KB
    -	level4PoolMaxSize = level3PoolMaxSize * 16 //   1 MB
    -)
    -
    -const (
    -	level0PoolIdx = iota
    -	level1PoolIdx
    -	level2PoolIdx
    -	level3PoolIdx
    -	level4PoolIdx
    -	levelMaxPoolIdx
    -	poolArraySize
    -)
    -
    -type simpleSharedBufferChildPool interface {
    -	Get(size int) []byte
    -	Put(any)
    -}
    -
    -type bufferPool struct {
    -	sync.Pool
    -
    -	defaultSize int
    -}
    -
    -func (p *bufferPool) Get(size int) []byte {
    -	bs := p.Pool.Get().(*[]byte)
    -
    -	if cap(*bs) < size {
    -		p.Pool.Put(bs)
    -
    -		return make([]byte, size)
    -	}
    -
    -	return (*bs)[:size]
    -}
    -
    -func newBytesPool(size int) simpleSharedBufferChildPool {
    -	return &bufferPool{
    -		Pool: sync.Pool{
    -			New: func() any {
    -				bs := make([]byte, size)
    -				return &bs
    -			},
    -		},
    -		defaultSize: size,
    -	}
    -}
    -
    -// nopBufferPool is a buffer pool just makes new buffer without pooling.
    -type nopBufferPool struct {
    -}
    -
    -func (nopBufferPool) Get(length int) []byte {
    -	return make([]byte, length)
    -}
    -
    -func (nopBufferPool) Put(*[]byte) {
    -}
    diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go
    new file mode 100644
    index 0000000000..641c8e9794
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/stats/metrics.go
    @@ -0,0 +1,81 @@
    +/*
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package stats
    +
    +import "maps"
    +
    +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
    +// however Add and Remove can make copies with specific metrics added or
    +// removed, respectively.
    +//
    +// Do not construct directly; use NewMetricSet instead.
    +type MetricSet struct {
    +	// metrics are the set of metrics to initialize.
    +	metrics map[string]bool
    +}
    +
    +// NewMetricSet returns a MetricSet containing metricNames.
    +func NewMetricSet(metricNames ...string) *MetricSet {
    +	newMetrics := make(map[string]bool)
    +	for _, metric := range metricNames {
    +		newMetrics[metric] = true
    +	}
    +	return &MetricSet{metrics: newMetrics}
    +}
    +
    +// Metrics returns the metrics set. The returned map is read-only and must not
    +// be modified.
    +func (m *MetricSet) Metrics() map[string]bool {
    +	return m.metrics
    +}
    +
    +// Add adds the metricNames to the metrics set and returns a new copy with the
    +// additional metrics.
    +func (m *MetricSet) Add(metricNames ...string) *MetricSet {
    +	newMetrics := make(map[string]bool)
    +	for metric := range m.metrics {
    +		newMetrics[metric] = true
    +	}
    +
    +	for _, metric := range metricNames {
    +		newMetrics[metric] = true
    +	}
    +	return &MetricSet{metrics: newMetrics}
    +}
    +
    +// Join joins the metrics passed in with the metrics set, and returns a new copy
    +// with the merged metrics.
    +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
    +	newMetrics := make(map[string]bool)
    +	maps.Copy(newMetrics, m.metrics)
    +	maps.Copy(newMetrics, metrics.metrics)
    +	return &MetricSet{metrics: newMetrics}
    +}
    +
    +// Remove removes the metricNames from the metrics set and returns a new copy
    +// with the metrics removed.
    +func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
    +	newMetrics := make(map[string]bool)
    +	for metric := range m.metrics {
    +		newMetrics[metric] = true
    +	}
    +
    +	for _, metric := range metricNames {
    +		delete(newMetrics, metric)
    +	}
    +	return &MetricSet{metrics: newMetrics}
    +}
    diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
    index 4ab70e2d46..6f20d2d548 100644
    --- a/vendor/google.golang.org/grpc/stats/stats.go
    +++ b/vendor/google.golang.org/grpc/stats/stats.go
    @@ -73,10 +73,10 @@ func (*PickerUpdated) isRPCStats() {}
     type InPayload struct {
     	// Client is true if this InPayload is from client side.
     	Client bool
    -	// Payload is the payload with original type.
    +	// Payload is the payload with original type.  This may be modified after
    +	// the call to HandleRPC which provides the InPayload returns and must be
    +	// copied if needed later.
     	Payload any
    -	// Data is the serialized message payload.
    -	Data []byte
     
     	// Length is the size of the uncompressed payload data. Does not include any
     	// framing (gRPC or HTTP/2).
    @@ -143,10 +143,10 @@ func (s *InTrailer) isRPCStats() {}
     type OutPayload struct {
     	// Client is true if this OutPayload is from client side.
     	Client bool
    -	// Payload is the payload with original type.
    +	// Payload is the payload with original type.  This may be modified after
    +	// the call to HandleRPC which provides the OutPayload returns and must be
    +	// copied if needed later.
     	Payload any
    -	// Data is the serialized message payload.
    -	Data []byte
     	// Length is the size of the uncompressed payload data. Does not include any
     	// framing (gRPC or HTTP/2).
     	Length int
    @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client }
     
     func (s *ConnEnd) isConnStats() {}
     
    -type incomingTagsKey struct{}
    -type outgoingTagsKey struct{}
    -
     // SetTags attaches stats tagging data to the context, which will be sent in
     // the outgoing RPC with the header grpc-tags-bin.  Subsequent calls to
     // SetTags will overwrite the values from earlier calls.
     //
    -// NOTE: this is provided only for backward compatibility with existing clients
    -// and will likely be removed in an upcoming release.  New uses should transmit
    -// this type of data using metadata with a different, non-reserved (i.e. does
    -// not begin with "grpc-") header name.
    +// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
     func SetTags(ctx context.Context, b []byte) context.Context {
    -	return context.WithValue(ctx, outgoingTagsKey{}, b)
    +	return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
     }
     
     // Tags returns the tags from the context for the inbound RPC.
     //
    -// NOTE: this is provided only for backward compatibility with existing clients
    -// and will likely be removed in an upcoming release.  New uses should transmit
    -// this type of data using metadata with a different, non-reserved (i.e. does
    -// not begin with "grpc-") header name.
    +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
     func Tags(ctx context.Context) []byte {
    -	b, _ := ctx.Value(incomingTagsKey{}).([]byte)
    -	return b
    -}
    -
    -// SetIncomingTags attaches stats tagging data to the context, to be read by
    -// the application (not sent in outgoing RPCs).
    -//
    -// This is intended for gRPC-internal use ONLY.
    -func SetIncomingTags(ctx context.Context, b []byte) context.Context {
    -	return context.WithValue(ctx, incomingTagsKey{}, b)
    -}
    -
    -// OutgoingTags returns the tags from the context for the outbound RPC.
    -//
    -// This is intended for gRPC-internal use ONLY.
    -func OutgoingTags(ctx context.Context) []byte {
    -	b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
    -	return b
    +	traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
    +	if len(traceValues) == 0 {
    +		return nil
    +	}
    +	return []byte(traceValues[len(traceValues)-1])
     }
     
    -type incomingTraceKey struct{}
    -type outgoingTraceKey struct{}
    -
     // SetTrace attaches stats tagging data to the context, which will be sent in
     // the outgoing RPC with the header grpc-trace-bin.  Subsequent calls to
     // SetTrace will overwrite the values from earlier calls.
     //
    -// NOTE: this is provided only for backward compatibility with existing clients
    -// and will likely be removed in an upcoming release.  New uses should transmit
    -// this type of data using metadata with a different, non-reserved (i.e. does
    -// not begin with "grpc-") header name.
    +// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
     func SetTrace(ctx context.Context, b []byte) context.Context {
    -	return context.WithValue(ctx, outgoingTraceKey{}, b)
    +	return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
     }
     
     // Trace returns the trace from the context for the inbound RPC.
     //
    -// NOTE: this is provided only for backward compatibility with existing clients
    -// and will likely be removed in an upcoming release.  New uses should transmit
    -// this type of data using metadata with a different, non-reserved (i.e. does
    -// not begin with "grpc-") header name.
    +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
     func Trace(ctx context.Context) []byte {
    -	b, _ := ctx.Value(incomingTraceKey{}).([]byte)
    -	return b
    -}
    -
    -// SetIncomingTrace attaches stats tagging data to the context, to be read by
    -// the application (not sent in outgoing RPCs).  It is intended for
    -// gRPC-internal use.
    -func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
    -	return context.WithValue(ctx, incomingTraceKey{}, b)
    -}
    -
    -// OutgoingTrace returns the trace from the context for the outbound RPC.  It is
    -// intended for gRPC-internal use.
    -func OutgoingTrace(ctx context.Context) []byte {
    -	b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
    -	return b
    +	traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
    +	if len(traceValues) == 0 {
    +		return nil
    +	}
    +	return []byte(traceValues[len(traceValues)-1])
     }
    diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
    index d621f52b1a..17e2267b33 100644
    --- a/vendor/google.golang.org/grpc/stream.go
    +++ b/vendor/google.golang.org/grpc/stream.go
    @@ -23,11 +23,11 @@ import (
     	"errors"
     	"io"
     	"math"
    +	rand "math/rand/v2"
     	"strconv"
     	"sync"
     	"time"
     
    -	"golang.org/x/net/trace"
     	"google.golang.org/grpc/balancer"
     	"google.golang.org/grpc/codes"
     	"google.golang.org/grpc/encoding"
    @@ -35,13 +35,13 @@ import (
     	"google.golang.org/grpc/internal/balancerload"
     	"google.golang.org/grpc/internal/binarylog"
     	"google.golang.org/grpc/internal/channelz"
    -	"google.golang.org/grpc/internal/grpcrand"
     	"google.golang.org/grpc/internal/grpcutil"
     	imetadata "google.golang.org/grpc/internal/metadata"
     	iresolver "google.golang.org/grpc/internal/resolver"
     	"google.golang.org/grpc/internal/serviceconfig"
     	istatus "google.golang.org/grpc/internal/status"
     	"google.golang.org/grpc/internal/transport"
    +	"google.golang.org/grpc/mem"
     	"google.golang.org/grpc/metadata"
     	"google.golang.org/grpc/peer"
     	"google.golang.org/grpc/stats"
    @@ -113,7 +113,9 @@ type ClientStream interface {
     	// SendMsg is generally called by generated code. On error, SendMsg aborts
     	// the stream. If the error was generated by the client, the status is
     	// returned directly; otherwise, io.EOF is returned and the status of
    -	// the stream may be discovered using RecvMsg.
    +	// the stream may be discovered using RecvMsg. For unary or server-streaming
    +	// RPCs (StreamDesc.ClientStreams is false), a nil error is returned
    +	// unconditionally.
     	//
     	// SendMsg blocks until:
     	//   - There is sufficient flow control to schedule m with the transport, or
    @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
     
     	var mc serviceconfig.MethodConfig
     	var onCommit func()
    -	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
    +	newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
     		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
     	}
     
    @@ -360,7 +362,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
     		cs.attempt = a
     		return nil
     	}
    -	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
    +	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
     		return nil, err
     	}
     
    @@ -431,7 +433,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
     	var trInfo *traceInfo
     	if EnableTracing {
     		trInfo = &traceInfo{
    -			tr: trace.New("grpc.Sent."+methodFamily(method), method),
    +			tr: newTrace("grpc.Sent."+methodFamily(method), method),
     			firstLine: firstLine{
     				client: true,
     			},
    @@ -440,7 +442,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
     			trInfo.firstLine.deadline = time.Until(deadline)
     		}
     		trInfo.tr.LazyLog(&trInfo.firstLine, false)
    -		ctx = trace.NewContext(ctx, trInfo.tr)
    +		ctx = newTraceContext(ctx, trInfo.tr)
     	}
     
     	if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
    @@ -517,7 +519,8 @@ func (a *csAttempt) newStream() error {
     		return toRPCErr(nse.Err)
     	}
     	a.s = s
    -	a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
    +	a.ctx = s.Context()
    +	a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
     	return nil
     }
     
    @@ -566,10 +569,15 @@ type clientStream struct {
     	// place where we need to check if the attempt is nil.
     	attempt *csAttempt
     	// TODO(hedging): hedging will have multiple attempts simultaneously.
    -	committed  bool // active attempt committed for retry?
    -	onCommit   func()
    -	buffer     []func(a *csAttempt) error // operations to replay on retry
    -	bufferSize int                        // current size of buffer
    +	committed        bool // active attempt committed for retry?
    +	onCommit         func()
    +	replayBuffer     []replayOp // operations to replay on retry
    +	replayBufferSize int        // current size of replayBuffer
    +}
    +
    +type replayOp struct {
    +	op      func(a *csAttempt) error
    +	cleanup func()
     }
     
     // csAttempt implements a single transport stream attempt within a
    @@ -578,7 +586,7 @@ type csAttempt struct {
     	ctx        context.Context
     	cs         *clientStream
     	t          transport.ClientTransport
    -	s          *transport.Stream
    +	s          *transport.ClientStream
     	p          *parser
     	pickResult balancer.PickResult
     
    @@ -607,7 +615,12 @@ func (cs *clientStream) commitAttemptLocked() {
     		cs.onCommit()
     	}
     	cs.committed = true
    -	cs.buffer = nil
    +	for _, op := range cs.replayBuffer {
    +		if op.cleanup != nil {
    +			op.cleanup()
    +		}
    +	}
    +	cs.replayBuffer = nil
     }
     
     func (cs *clientStream) commitAttempt() {
    @@ -656,13 +669,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
     		if len(sps) == 1 {
     			var e error
     			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
    -				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
    +				channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
     				cs.retryThrottler.throttle() // This counts as a failure for throttling.
     				return false, err
     			}
     			hasPushback = true
     		} else if len(sps) > 1 {
    -			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
    +			channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
     			cs.retryThrottler.throttle() // This counts as a failure for throttling.
     			return false, err
     		}
    @@ -695,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
     		cs.numRetriesSincePushback = 0
     	} else {
     		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
    -		cur := float64(rp.InitialBackoff) * fact
    -		if max := float64(rp.MaxBackoff); cur > max {
    -			cur = max
    -		}
    -		dur = time.Duration(grpcrand.Int63n(int64(cur)))
    +		cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
    +		// Apply jitter by multiplying with a random factor between 0.8 and 1.2
    +		cur *= 0.8 + 0.4*rand.Float64()
    +		dur = time.Duration(int64(cur))
     		cs.numRetriesSincePushback++
     	}
     
    @@ -732,7 +744,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
     			// the stream is canceled.
     			return err
     		}
    -		// Note that the first op in the replay buffer always sets cs.attempt
    +		// Note that the first op in replayBuffer always sets cs.attempt
     		// if it is able to pick a transport and create a stream.
     		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
     			return nil
    @@ -761,7 +773,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
     			// already be status errors.
     			return toRPCErr(op(cs.attempt))
     		}
    -		if len(cs.buffer) == 0 {
    +		if len(cs.replayBuffer) == 0 {
     			// For the first op, which controls creation of the stream and
     			// assigns cs.attempt, we need to create a new attempt inline
     			// before executing the first op.  On subsequent ops, the attempt
    @@ -851,25 +863,26 @@ func (cs *clientStream) Trailer() metadata.MD {
     }
     
     func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
    -	for _, f := range cs.buffer {
    -		if err := f(attempt); err != nil {
    +	for _, f := range cs.replayBuffer {
    +		if err := f.op(attempt); err != nil {
     			return err
     		}
     	}
     	return nil
     }
     
    -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
    +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
     	// Note: we still will buffer if retry is disabled (for transparent retries).
     	if cs.committed {
     		return
     	}
    -	cs.bufferSize += sz
    -	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
    +	cs.replayBufferSize += sz
    +	if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
     		cs.commitAttemptLocked()
    +		cleanup()
     		return
     	}
    -	cs.buffer = append(cs.buffer, op)
    +	cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
     }
     
     func (cs *clientStream) SendMsg(m any) (err error) {
    @@ -891,23 +904,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
     	}
     
     	// load hdr, payload, data
    -	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
    +	hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
     	if err != nil {
     		return err
     	}
     
    +	defer func() {
    +		data.Free()
    +		// only free payload if compression was made, and therefore it is a different set
    +		// of buffers from data.
    +		if pf.isCompressed() {
    +			payload.Free()
    +		}
    +	}()
    +
    +	dataLen := data.Len()
    +	payloadLen := payload.Len()
     	// TODO(dfawley): should we be checking len(data) instead?
    -	if len(payload) > *cs.callInfo.maxSendMessageSize {
    -		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
    +	if payloadLen > *cs.callInfo.maxSendMessageSize {
    +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
     	}
    +
    +	// always take an extra ref in case data == payload (i.e. when the data isn't
    +	// compressed). The original ref will always be freed by the deferred free above.
    +	payload.Ref()
     	op := func(a *csAttempt) error {
    -		return a.sendMsg(m, hdr, payload, data)
    +		return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
    +	}
    +
    +	// onSuccess is invoked when the op is captured for a subsequent retry. If the
    +	// stream was established by a previous message and therefore retries are
    +	// disabled, onSuccess will not be invoked, and payloadRef can be freed
    +	// immediately.
    +	onSuccessCalled := false
    +	err = cs.withRetry(op, func() {
    +		cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
    +		onSuccessCalled = true
    +	})
    +	if !onSuccessCalled {
    +		payload.Free()
     	}
    -	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
     	if len(cs.binlogs) != 0 && err == nil {
     		cm := &binarylog.ClientMessage{
     			OnClientSide: true,
    -			Message:      data,
    +			Message:      data.Materialize(),
     		}
     		for _, binlog := range cs.binlogs {
     			binlog.Log(cs.ctx, cm)
    @@ -924,6 +964,7 @@ func (cs *clientStream) RecvMsg(m any) error {
     	var recvInfo *payloadInfo
     	if len(cs.binlogs) != 0 {
     		recvInfo = &payloadInfo{}
    +		defer recvInfo.free()
     	}
     	err := cs.withRetry(func(a *csAttempt) error {
     		return a.recvMsg(m, recvInfo)
    @@ -931,7 +972,7 @@ func (cs *clientStream) RecvMsg(m any) error {
     	if len(cs.binlogs) != 0 && err == nil {
     		sm := &binarylog.ServerMessage{
     			OnClientSide: true,
    -			Message:      recvInfo.uncompressedBytes,
    +			Message:      recvInfo.uncompressedBytes.Materialize(),
     		}
     		for _, binlog := range cs.binlogs {
     			binlog.Log(cs.ctx, sm)
    @@ -951,14 +992,14 @@ func (cs *clientStream) CloseSend() error {
     	}
     	cs.sentLast = true
     	op := func(a *csAttempt) error {
    -		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
    +		a.s.Write(nil, nil, &transport.WriteOptions{Last: true})
     		// Always return nil; io.EOF is the only error that might make sense
     		// instead, but there is no need to signal the client to call RecvMsg
     		// as the only use left for the stream after CloseSend is to call
     		// RecvMsg.  This also matches historical behavior.
     		return nil
     	}
    -	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
    +	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
     	if len(cs.binlogs) != 0 {
     		chc := &binarylog.ClientHalfClose{
     			OnClientSide: true,
    @@ -1034,7 +1075,7 @@ func (cs *clientStream) finish(err error) {
     	cs.cancel()
     }
     
    -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
    +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
     	cs := a.cs
     	if a.trInfo != nil {
     		a.mu.Lock()
    @@ -1043,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
     		}
     		a.mu.Unlock()
     	}
    -	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
    +	if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
     		if !cs.desc.ClientStreams {
     			// For non-client-streaming RPCs, we return nil instead of EOF on error
     			// because the generated code requires it.  finish is not called; RecvMsg()
    @@ -1052,11 +1093,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
     		}
     		return io.EOF
     	}
    -	for _, sh := range a.statsHandlers {
    -		sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
    -	}
    -	if channelz.IsOn() {
    -		a.t.IncrMsgSent()
    +	if len(a.statsHandlers) != 0 {
    +		for _, sh := range a.statsHandlers {
    +			sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
    +		}
     	}
     	return nil
     }
    @@ -1065,6 +1105,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     	cs := a.cs
     	if len(a.statsHandlers) != 0 && payInfo == nil {
     		payInfo = &payloadInfo{}
    +		defer payInfo.free()
     	}
     
     	if !a.decompSet {
    @@ -1083,8 +1124,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     		// Only initialize this state once per stream.
     		a.decompSet = true
     	}
    -	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
    -	if err != nil {
    +	if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
     		if err == io.EOF {
     			if statusErr := a.s.Status().Err(); statusErr != nil {
     				return statusErr
    @@ -1103,33 +1143,26 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
     	}
     	for _, sh := range a.statsHandlers {
     		sh.HandleRPC(a.ctx, &stats.InPayload{
    -			Client:   true,
    -			RecvTime: time.Now(),
    -			Payload:  m,
    -			// TODO truncate large payload.
    -			Data:             payInfo.uncompressedBytes,
    +			Client:           true,
    +			RecvTime:         time.Now(),
    +			Payload:          m,
     			WireLength:       payInfo.compressedLength + headerLen,
     			CompressedLength: payInfo.compressedLength,
    -			Length:           len(payInfo.uncompressedBytes),
    +			Length:           payInfo.uncompressedBytes.Len(),
     		})
     	}
    -	if channelz.IsOn() {
    -		a.t.IncrMsgRecv()
    -	}
     	if cs.desc.ServerStreams {
     		// Subsequent messages should be received by subsequent RecvMsg calls.
     		return nil
     	}
     	// Special handling for non-server-stream rpcs.
     	// This recv expects EOF or errors, so we don't collect inPayload.
    -	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
    -	if err == nil {
    -		return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
    -	}
    -	if err == io.EOF {
    +	if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
     		return a.s.Status().Err() // non-server streaming Recv returns nil on success
    +	} else if err != nil {
    +		return toRPCErr(err)
     	}
    -	return toRPCErr(err)
    +	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
     }
     
     func (a *csAttempt) finish(err error) {
    @@ -1145,7 +1178,7 @@ func (a *csAttempt) finish(err error) {
     	}
     	var tr metadata.MD
     	if a.s != nil {
    -		a.t.CloseStream(a.s, err)
    +		a.s.Close(err)
     		tr = a.s.Trailer()
     	}
     
    @@ -1185,12 +1218,12 @@ func (a *csAttempt) finish(err error) {
     	a.mu.Unlock()
     }
     
    -// newClientStream creates a ClientStream with the specified transport, on the
    +// newNonRetryClientStream creates a ClientStream with the specified transport, on the
     // given addrConn.
     //
     // It's expected that the given transport is either the same one in addrConn, or
     // is already closed. To avoid race, transport is specified separately, instead
    -// of using ac.transpot.
    +// of using ac.transport.
     //
     // Main difference between this and ClientConn.NewStream:
     // - no retry
    @@ -1276,7 +1309,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
     		return nil, err
     	}
     	as.s = s
    -	as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
    +	as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
     	ac.incrCallsStarted()
     	if desc != unaryStreamDesc {
     		// Listen on stream context to cleanup when the stream context is
    @@ -1302,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
     }
     
     type addrConnStream struct {
    -	s         *transport.Stream
    +	s         *transport.ClientStream
     	ac        *addrConn
     	callHdr   *transport.CallHdr
     	cancel    context.CancelFunc
    @@ -1342,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error {
     	}
     	as.sentLast = true
     
    -	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
    +	as.s.Write(nil, nil, &transport.WriteOptions{Last: true})
     	// Always return nil; io.EOF is the only error that might make sense
     	// instead, but there is no need to signal the client to call RecvMsg
     	// as the only use left for the stream after CloseSend is to call
    @@ -1373,17 +1406,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
     	}
     
     	// load hdr, payload, data
    -	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
    +	hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
     	if err != nil {
     		return err
     	}
     
    +	defer func() {
    +		data.Free()
    +		// only free payload if compression was made, and therefore it is a different set
    +		// of buffers from data.
    +		if pf.isCompressed() {
    +			payload.Free()
    +		}
    +	}()
    +
     	// TODO(dfawley): should we be checking len(data) instead?
    -	if len(payld) > *as.callInfo.maxSendMessageSize {
    -		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
    +	if payload.Len() > *as.callInfo.maxSendMessageSize {
    +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
     	}
     
    -	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
    +	if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
     		if !as.desc.ClientStreams {
     			// For non-client-streaming RPCs, we return nil instead of EOF on error
     			// because the generated code requires it.  finish is not called; RecvMsg()
    @@ -1393,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
     		return io.EOF
     	}
     
    -	if channelz.IsOn() {
    -		as.t.IncrMsgSent()
    -	}
     	return nil
     }
     
    @@ -1423,8 +1462,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     		// Only initialize this state once per stream.
     		as.decompSet = true
     	}
    -	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
    -	if err != nil {
    +	if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
     		if err == io.EOF {
     			if statusErr := as.s.Status().Err(); statusErr != nil {
     				return statusErr
    @@ -1434,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     		return toRPCErr(err)
     	}
     
    -	if channelz.IsOn() {
    -		as.t.IncrMsgRecv()
    -	}
     	if as.desc.ServerStreams {
     		// Subsequent messages should be received by subsequent RecvMsg calls.
     		return nil
    @@ -1444,14 +1479,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
     
     	// Special handling for non-server-stream rpcs.
     	// This recv expects EOF or errors, so we don't collect inPayload.
    -	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
    -	if err == nil {
    -		return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
    -	}
    -	if err == io.EOF {
    +	if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
     		return as.s.Status().Err() // non-server streaming Recv returns nil on success
    +	} else if err != nil {
    +		return toRPCErr(err)
     	}
    -	return toRPCErr(err)
    +	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
     }
     
     func (as *addrConnStream) finish(err error) {
    @@ -1466,7 +1499,7 @@ func (as *addrConnStream) finish(err error) {
     		err = nil
     	}
     	if as.s != nil {
    -		as.t.CloseStream(as.s, err)
    +		as.s.Close(err)
     	}
     
     	if err != nil {
    @@ -1533,8 +1566,7 @@ type ServerStream interface {
     // serverStream implements a server side Stream.
     type serverStream struct {
     	ctx   context.Context
    -	t     transport.ServerTransport
    -	s     *transport.Stream
    +	s     *transport.ServerStream
     	p     *parser
     	codec baseCodec
     
    @@ -1584,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
     		return status.Error(codes.Internal, err.Error())
     	}
     
    -	err = ss.t.WriteHeader(ss.s, md)
    +	err = ss.s.SendHeader(md)
     	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
     		h, _ := ss.s.Header()
     		sh := &binarylog.ServerHeader{
    @@ -1624,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     		}
     		if err != nil && err != io.EOF {
     			st, _ := status.FromError(toRPCErr(err))
    -			ss.t.WriteStatus(ss.s, st)
    +			ss.s.WriteStatus(st)
     			// Non-user specified status was sent out. This should be an error
     			// case (as a server side Cancel maybe).
     			//
    @@ -1632,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     			// status from the service handler, we will log that error instead.
     			// This behavior is similar to an interceptor.
     		}
    -		if channelz.IsOn() && err == nil {
    -			ss.t.IncrMsgSent()
    -		}
     	}()
     
     	// Server handler could have set new compressor by calling SetSendCompressor.
    @@ -1645,18 +1674,31 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     	}
     
     	// load hdr, payload, data
    -	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
    +	hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
     	if err != nil {
     		return err
     	}
     
    +	defer func() {
    +		data.Free()
    +		// only free payload if compression was made, and therefore it is a different set
    +		// of buffers from data.
    +		if pf.isCompressed() {
    +			payload.Free()
    +		}
    +	}()
    +
    +	dataLen := data.Len()
    +	payloadLen := payload.Len()
    +
     	// TODO(dfawley): should we be checking len(data) instead?
    -	if len(payload) > ss.maxSendMessageSize {
    -		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
    +	if payloadLen > ss.maxSendMessageSize {
    +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
     	}
    -	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
    +	if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
     		return toRPCErr(err)
     	}
    +
     	if len(ss.binlogs) != 0 {
     		if !ss.serverHeaderBinlogged {
     			h, _ := ss.s.Header()
    @@ -1669,7 +1711,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     			}
     		}
     		sm := &binarylog.ServerMessage{
    -			Message: data,
    +			Message: data.Materialize(),
     		}
     		for _, binlog := range ss.binlogs {
     			binlog.Log(ss.ctx, sm)
    @@ -1677,7 +1719,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
     	}
     	if len(ss.statsHandler) != 0 {
     		for _, sh := range ss.statsHandler {
    -			sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
    +			sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
     		}
     	}
     	return nil
    @@ -1699,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     		}
     		if err != nil && err != io.EOF {
     			st, _ := status.FromError(toRPCErr(err))
    -			ss.t.WriteStatus(ss.s, st)
    +			ss.s.WriteStatus(st)
     			// Non-user specified status was sent out. This should be an error
     			// case (as a server side Cancel maybe).
     			//
    @@ -1707,15 +1749,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     			// status from the service handler, we will log that error instead.
     			// This behavior is similar to an interceptor.
     		}
    -		if channelz.IsOn() && err == nil {
    -			ss.t.IncrMsgRecv()
    -		}
     	}()
     	var payInfo *payloadInfo
     	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
     		payInfo = &payloadInfo{}
    +		defer payInfo.free()
     	}
    -	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
    +	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
     		if err == io.EOF {
     			if len(ss.binlogs) != 0 {
     				chc := &binarylog.ClientHalfClose{}
    @@ -1733,11 +1773,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     	if len(ss.statsHandler) != 0 {
     		for _, sh := range ss.statsHandler {
     			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
    -				RecvTime: time.Now(),
    -				Payload:  m,
    -				// TODO truncate large payload.
    -				Data:             payInfo.uncompressedBytes,
    -				Length:           len(payInfo.uncompressedBytes),
    +				RecvTime:         time.Now(),
    +				Payload:          m,
    +				Length:           payInfo.uncompressedBytes.Len(),
     				WireLength:       payInfo.compressedLength + headerLen,
     				CompressedLength: payInfo.compressedLength,
     			})
    @@ -1745,7 +1783,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
     	}
     	if len(ss.binlogs) != 0 {
     		cm := &binarylog.ClientMessage{
    -			Message: payInfo.uncompressedBytes,
    +			Message: payInfo.uncompressedBytes.Materialize(),
     		}
     		for _, binlog := range ss.binlogs {
     			binlog.Log(ss.ctx, cm)
    @@ -1760,23 +1798,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
     	return Method(stream.Context())
     }
     
    -// prepareMsg returns the hdr, payload and data
    -// using the compressors passed or using the
    -// passed preparedmsg
    -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
    +// prepareMsg returns the hdr, payload and data using the compressors passed or
    +// using the passed preparedmsg. The returned boolean indicates whether
    +// compression was made and therefore whether the payload needs to be freed in
    +// addition to the returned data. Freeing the payload if the returned boolean is
    +// false can lead to undefined behavior.
    +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
     	if preparedMsg, ok := m.(*PreparedMsg); ok {
    -		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
    +		return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
     	}
     	// The input interface is not a prepared msg.
     	// Marshal and Compress the data at this point
     	data, err = encode(codec, m)
     	if err != nil {
    -		return nil, nil, nil, err
    +		return nil, nil, nil, 0, err
     	}
    -	compData, err := compress(data, cp, comp)
    +	compData, pf, err := compress(data, cp, comp, pool)
     	if err != nil {
    -		return nil, nil, nil, err
    +		data.Free()
    +		return nil, nil, nil, 0, err
     	}
    -	hdr, payload = msgHeader(data, compData)
    -	return hdr, payload, data, nil
    +	hdr, payload = msgHeader(data, compData, pf)
    +	return hdr, data, payload, pf, nil
     }
    diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
    new file mode 100644
    index 0000000000..0037fee0bd
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/stream_interfaces.go
    @@ -0,0 +1,238 @@
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package grpc
    +
    +// ServerStreamingClient represents the client side of a server-streaming (one
    +// request, many responses) RPC. It is generic over the type of the response
    +// message. It is used in generated code.
    +type ServerStreamingClient[Res any] interface {
    +	// Recv receives the next response message from the server. The client may
    +	// repeatedly call Recv to read messages from the response stream.  If
    +	// io.EOF is returned, the stream has terminated with an OK status.  Any
    +	// other error is compatible with the status package and indicates the
    +	// RPC's status code and message.
    +	Recv() (*Res, error)
    +
    +	// ClientStream is embedded to provide Context, Header, and Trailer
    +	// functionality.  No other methods in the ClientStream should be called
    +	// directly.
    +	ClientStream
    +}
    +
    +// ServerStreamingServer represents the server side of a server-streaming (one
    +// request, many responses) RPC. It is generic over the type of the response
    +// message. It is used in generated code.
    +//
    +// To terminate the response stream, return from the handler method and return
    +// an error from the status package, or use nil to indicate an OK status code.
    +type ServerStreamingServer[Res any] interface {
    +	// Send sends a response message to the client.  The server handler may
    +	// call Send multiple times to send multiple messages to the client.  An
    +	// error is returned if the stream was terminated unexpectedly, and the
    +	// handler method should return, as the stream is no longer usable.
    +	Send(*Res) error
    +
    +	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
    +	// SetTrailer functionality.  No other methods in the ServerStream should
    +	// be called directly.
    +	ServerStream
    +}
    +
    +// ClientStreamingClient represents the client side of a client-streaming (many
    +// requests, one response) RPC. It is generic over both the type of the request
    +// message stream and the type of the unary response message. It is used in
    +// generated code.
    +type ClientStreamingClient[Req any, Res any] interface {
    +	// Send sends a request message to the server.  The client may call Send
    +	// multiple times to send multiple messages to the server.  On error, Send
    +	// aborts the stream.  If the error was generated by the client, the status
    +	// is returned directly.  Otherwise, io.EOF is returned, and the status of
    +	// the stream may be discovered using CloseAndRecv().
    +	Send(*Req) error
    +
    +	// CloseAndRecv closes the request stream and waits for the server's
    +	// response.  This method must be called once and only once after sending
    +	// all request messages.  Any error returned is implemented by the status
    +	// package.
    +	CloseAndRecv() (*Res, error)
    +
    +	// ClientStream is embedded to provide Context, Header, and Trailer
    +	// functionality.  No other methods in the ClientStream should be called
    +	// directly.
    +	ClientStream
    +}
    +
    +// ClientStreamingServer represents the server side of a client-streaming (many
    +// requests, one response) RPC. It is generic over both the type of the request
    +// message stream and the type of the unary response message. It is used in
    +// generated code.
    +//
    +// To terminate the RPC, call SendAndClose and return nil from the method
    +// handler or do not call SendAndClose and return an error from the status
    +// package.
    +type ClientStreamingServer[Req any, Res any] interface {
    +	// Recv receives the next request message from the client.  The server may
    +	// repeatedly call Recv to read messages from the request stream.  If
    +	// io.EOF is returned, it indicates the client called CloseAndRecv on its
    +	// ClientStreamingClient.  Any other error indicates the stream was
    +	// terminated unexpectedly, and the handler method should return, as the
    +	// stream is no longer usable.
    +	Recv() (*Req, error)
    +
    +	// SendAndClose sends a single response message to the client and closes
    +	// the stream.  This method must be called once and only once after all
    +	// request messages have been processed.  Recv should not be called after
    +	// calling SendAndClose.
    +	SendAndClose(*Res) error
    +
    +	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
    +	// SetTrailer functionality.  No other methods in the ServerStream should
    +	// be called directly.
    +	ServerStream
    +}
    +
    +// BidiStreamingClient represents the client side of a bidirectional-streaming
    +// (many requests, many responses) RPC. It is generic over both the type of the
    +// request message stream and the type of the response message stream. It is
    +// used in generated code.
    +type BidiStreamingClient[Req any, Res any] interface {
    +	// Send sends a request message to the server.  The client may call Send
    +	// multiple times to send multiple messages to the server.  On error, Send
    +	// aborts the stream.  If the error was generated by the client, the status
    +	// is returned directly.  Otherwise, io.EOF is returned, and the status of
    +	// the stream may be discovered using Recv().
    +	Send(*Req) error
    +
    +	// Recv receives the next response message from the server. The client may
    +	// repeatedly call Recv to read messages from the response stream.  If
    +	// io.EOF is returned, the stream has terminated with an OK status.  Any
    +	// other error is compatible with the status package and indicates the
    +	// RPC's status code and message.
    +	Recv() (*Res, error)
    +
    +	// ClientStream is embedded to provide Context, Header, Trailer, and
    +	// CloseSend functionality.  No other methods in the ClientStream should be
    +	// called directly.
    +	ClientStream
    +}
    +
    +// BidiStreamingServer represents the server side of a bidirectional-streaming
    +// (many requests, many responses) RPC. It is generic over both the type of the
    +// request message stream and the type of the response message stream. It is
    +// used in generated code.
    +//
    +// To terminate the stream, return from the handler method and return
    +// an error from the status package, or use nil to indicate an OK status code.
    +type BidiStreamingServer[Req any, Res any] interface {
    +	// Recv receives the next request message from the client.  The server may
    +	// repeatedly call Recv to read messages from the request stream.  If
    +	// io.EOF is returned, it indicates the client called CloseSend on its
    +	// BidiStreamingClient.  Any other error indicates the stream was
    +	// terminated unexpectedly, and the handler method should return, as the
    +	// stream is no longer usable.
    +	Recv() (*Req, error)
    +
    +	// Send sends a response message to the client.  The server handler may
    +	// call Send multiple times to send multiple messages to the client.  An
    +	// error is returned if the stream was terminated unexpectedly, and the
    +	// handler method should return, as the stream is no longer usable.
    +	Send(*Res) error
    +
    +	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
    +	// SetTrailer functionality.  No other methods in the ServerStream should
    +	// be called directly.
    +	ServerStream
    +}
    +
    +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
    +// and BidiStreamingClient interfaces. It is used in generated code.
    +type GenericClientStream[Req any, Res any] struct {
    +	ClientStream
    +}
    +
    +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
    +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
    +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
    +
    +// Send pushes one message into the stream of requests to be consumed by the
    +// server. The type of message which can be sent is determined by the Req type
    +// parameter of the GenericClientStream receiver.
    +func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
    +	return x.ClientStream.SendMsg(m)
    +}
    +
    +// Recv reads one message from the stream of responses generated by the server.
    +// The type of the message returned is determined by the Res type parameter
    +// of the GenericClientStream receiver.
    +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
    +	m := new(Res)
    +	if err := x.ClientStream.RecvMsg(m); err != nil {
    +		return nil, err
    +	}
    +	return m, nil
    +}
    +
    +// CloseAndRecv closes the sending side of the stream, then receives the unary
    +// response from the server. The type of message which it returns is determined
    +// by the Res type parameter of the GenericClientStream receiver.
    +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
    +	if err := x.ClientStream.CloseSend(); err != nil {
    +		return nil, err
    +	}
    +	m := new(Res)
    +	if err := x.ClientStream.RecvMsg(m); err != nil {
    +		return nil, err
    +	}
    +	return m, nil
    +}
    +
    +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
    +// and BidiStreamingServer interfaces. It is used in generated code.
    +type GenericServerStream[Req any, Res any] struct {
    +	ServerStream
    +}
    +
    +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
    +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
    +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
    +
    +// Send pushes one message into the stream of responses to be consumed by the
    +// client. The type of message which can be sent is determined by the Res
    +// type parameter of the serverStreamServer receiver.
    +func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
    +	return x.ServerStream.SendMsg(m)
    +}
    +
    +// SendAndClose pushes the unary response to the client. The type of message
    +// which can be sent is determined by the Res type parameter of the
    +// clientStreamServer receiver.
    +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
    +	return x.ServerStream.SendMsg(m)
    +}
    +
    +// Recv reads one message from the stream of requests generated by the client.
    +// The type of the message returned is determined by the Req type parameter
    +// of the clientStreamServer receiver.
    +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
    +	m := new(Req)
    +	if err := x.ServerStream.RecvMsg(m); err != nil {
    +		return nil, err
    +	}
    +	return m, nil
    +}
    diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
    index 9ded79321b..10f4f798f5 100644
    --- a/vendor/google.golang.org/grpc/trace.go
    +++ b/vendor/google.golang.org/grpc/trace.go
    @@ -26,8 +26,6 @@ import (
     	"strings"
     	"sync"
     	"time"
    -
    -	"golang.org/x/net/trace"
     )
     
     // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
    @@ -44,9 +42,31 @@ func methodFamily(m string) string {
     	return m
     }
     
    +// traceEventLog mirrors golang.org/x/net/trace.EventLog.
    +//
    +// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
    +type traceEventLog interface {
    +	Printf(format string, a ...any)
    +	Errorf(format string, a ...any)
    +	Finish()
    +}
    +
    +// traceLog mirrors golang.org/x/net/trace.Trace.
    +//
    +// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
    +type traceLog interface {
    +	LazyLog(x fmt.Stringer, sensitive bool)
    +	LazyPrintf(format string, a ...any)
    +	SetError()
    +	SetRecycler(f func(any))
    +	SetTraceInfo(traceID, spanID uint64)
    +	SetMaxEvents(m int)
    +	Finish()
    +}
    +
     // traceInfo contains tracing information for an RPC.
     type traceInfo struct {
    -	tr        trace.Trace
    +	tr        traceLog
     	firstLine firstLine
     }
     
    diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go
    new file mode 100644
    index 0000000000..1da3a2308e
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/trace_notrace.go
    @@ -0,0 +1,52 @@
    +//go:build grpcnotrace
    +
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package grpc
    +
    +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
    +// turn enables binaries using gRPC-Go for dead code elimination, which can
    +// yield 10-15% improvements in binary size when tracing is not needed.
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +type notrace struct{}
    +
    +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
    +func (notrace) LazyPrintf(format string, a ...any)     {}
    +func (notrace) SetError()                              {}
    +func (notrace) SetRecycler(f func(any))                {}
    +func (notrace) SetTraceInfo(traceID, spanID uint64)    {}
    +func (notrace) SetMaxEvents(m int)                     {}
    +func (notrace) Finish()                                {}
    +
    +func newTrace(family, title string) traceLog {
    +	return notrace{}
    +}
    +
    +func newTraceContext(ctx context.Context, tr traceLog) context.Context {
    +	return ctx
    +}
    +
    +func newTraceEventLog(family, title string) traceEventLog {
    +	return nil
    +}
    diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go
    new file mode 100644
    index 0000000000..88d6e8571e
    --- /dev/null
    +++ b/vendor/google.golang.org/grpc/trace_withtrace.go
    @@ -0,0 +1,39 @@
    +//go:build !grpcnotrace
    +
    +/*
    + *
    + * Copyright 2024 gRPC authors.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + *
    + */
    +
    +package grpc
    +
    +import (
    +	"context"
    +
    +	t "golang.org/x/net/trace"
    +)
    +
    +func newTrace(family, title string) traceLog {
    +	return t.New(family, title)
    +}
    +
    +func newTraceContext(ctx context.Context, tr traceLog) context.Context {
    +	return t.NewContext(ctx, tr)
    +}
    +
    +func newTraceEventLog(family, title string) traceEventLog {
    +	return t.NewEventLog(family, title)
    +}
    diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
    index 1ad1ba2ad6..d2bba7f3d9 100644
    --- a/vendor/google.golang.org/grpc/version.go
    +++ b/vendor/google.golang.org/grpc/version.go
    @@ -19,4 +19,4 @@
     package grpc
     
     // Version is the current grpc version.
    -const Version = "1.61.0"
    +const Version = "1.69.4"
    diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
    deleted file mode 100644
    index 5da38a4099..0000000000
    --- a/vendor/google.golang.org/grpc/vet.sh
    +++ /dev/null
    @@ -1,190 +0,0 @@
    -#!/bin/bash
    -
    -set -ex  # Exit on error; debugging enabled.
    -set -o pipefail  # Fail a pipe if any sub-command fails.
    -
    -# not makes sure the command passed to it does not exit with a return code of 0.
    -not() {
    -  # This is required instead of the earlier (! $COMMAND) because subshells and
    -  # pipefail don't work the same on Darwin as in Linux.
    -  ! "$@"
    -}
    -
    -die() {
    -  echo "$@" >&2
    -  exit 1
    -}
    -
    -fail_on_output() {
    -  tee /dev/stderr | not read
    -}
    -
    -# Check to make sure it's safe to modify the user's git repo.
    -git status --porcelain | fail_on_output
    -
    -# Undo any edits made by this script.
    -cleanup() {
    -  git reset --hard HEAD
    -}
    -trap cleanup EXIT
    -
    -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
    -go version
    -
    -if [[ "$1" = "-install" ]]; then
    -  # Install the pinned versions as defined in module tools.
    -  pushd ./test/tools
    -  go install \
    -    golang.org/x/tools/cmd/goimports \
    -    honnef.co/go/tools/cmd/staticcheck \
    -    github.com/client9/misspell/cmd/misspell
    -  popd
    -  if [[ -z "${VET_SKIP_PROTO}" ]]; then
    -    if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
    -      PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
    -      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
    -      pushd /home/runner/go
    -      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
    -      unzip ${PROTOC_FILENAME}
    -      bin/protoc --version
    -      popd
    -    elif not which protoc > /dev/null; then
    -      die "Please install protoc into your path"
    -    fi
    -  fi
    -  exit 0
    -elif [[ "$#" -ne 0 ]]; then
    -  die "Unknown argument(s): $*"
    -fi
    -
    -# - Check that generated proto files are up to date.
    -if [[ -z "${VET_SKIP_PROTO}" ]]; then
    -  make proto && git status --porcelain 2>&1 | fail_on_output || \
    -    (git status; git --no-pager diff; exit 1)
    -fi
    -
    -if [[ -n "${VET_ONLY_PROTO}" ]]; then
    -  exit 0
    -fi
    -
    -# - Ensure all source files contain a copyright message.
    -# (Done in two parts because Darwin "git grep" has broken support for compound
    -# exclusion matches.)
    -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
    -
    -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
    -not grep 'func Test[^(]' *_test.go
    -not grep 'func Test[^(]' test/*.go
    -
    -# - Check for typos in test function names
    -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test'
    -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example'
    -
    -# - Do not import x/net/context.
    -not git grep -l 'x/net/context' -- "*.go"
    -
    -# - Do not import math/rand for real library code.  Use internal/grpcrand for
    -#   thread safety.
    -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
    -
    -# - Do not use "interface{}"; use "any" instead.
    -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
    -
    -# - Do not call grpclog directly. Use grpclog.Component instead.
    -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
    -
    -# - Ensure all ptypes proto packages are renamed when importing.
    -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
    -
    -# - Ensure all usages of grpc_testing package are renamed when importing.
    -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
    -
    -# - Ensure all xds proto imports are renamed to *pb or *grpc.
    -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
    -
    -misspell -error .
    -
    -# - gofmt, goimports, go vet, go mod tidy.
    -# Perform these checks on each module inside gRPC.
    -for MOD_FILE in $(find . -name 'go.mod'); do
    -  MOD_DIR=$(dirname ${MOD_FILE})
    -  pushd ${MOD_DIR}
    -  go vet -all ./... | fail_on_output
    -  gofmt -s -d -l . 2>&1 | fail_on_output
    -  goimports -l . 2>&1 | not grep -vE "\.pb\.go"
    -
    -  go mod tidy -compat=1.19
    -  git status --porcelain 2>&1 | fail_on_output || \
    -    (git status; git --no-pager diff; exit 1)
    -  popd
    -done
    -
    -# - Collection of static analysis checks
    -SC_OUT="$(mktemp)"
    -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
    -
    -# Error for anything other than checks that need exclusions.
    -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
    -
    -# Exclude underscore checks for generated code.
    -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
    -
    -# Error for duplicate imports not including grpc protos.
    -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
    -channelz/grpc_channelz_v1"
    -go-control-plane/envoy
    -grpclb/grpc_lb_v1"
    -health/grpc_health_v1"
    -interop/grpc_testing"
    -orca/v3"
    -proto/grpc_gcp"
    -proto/grpc_lookup_v1"
    -reflection/grpc_reflection_v1"
    -reflection/grpc_reflection_v1alpha"
    -XXXXX PleaseIgnoreUnused'
    -
    -# Error for any package comments not in generated code.
    -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:"
    -
    -# Only ignore the following deprecated types/fields/functions and exclude
    -# generated code.
    -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
    -XXXXX Protobuf related deprecation errors:
    -"github.com/golang/protobuf
    -.pb.go:
    -grpc_testing_not_regenerate
    -: ptypes.
    -proto.RegisterType
    -XXXXX gRPC internal usage deprecation errors:
    -"google.golang.org/grpc
    -: grpc.
    -: v1alpha.
    -: v1alphareflectionpb.
    -BalancerAttributes is deprecated:
    -CredsBundle is deprecated:
    -Metadata is deprecated: use Attributes instead.
    -NewSubConn is deprecated:
    -OverrideServerName is deprecated:
    -RemoveSubConn is deprecated:
    -SecurityVersion is deprecated:
    -Target is deprecated: Use the Target field in the BuildOptions instead.
    -UpdateAddresses is deprecated:
    -UpdateSubConnState is deprecated:
    -balancer.ErrTransientFailure is deprecated:
    -grpc/reflection/v1alpha/reflection.proto
    -XXXXX xDS deprecated fields we support
    -.ExactMatch
    -.PrefixMatch
    -.SafeRegexMatch
    -.SuffixMatch
    -GetContainsMatch
    -GetExactMatch
    -GetMatchSubjectAltNames
    -GetPrefixMatch
    -GetSafeRegexMatch
    -GetSuffixMatch
    -GetTlsCertificateCertificateProviderInstance
    -GetValidationContextCertificateProviderInstance
    -XXXXX PleaseIgnoreUnused'
    -
    -echo SUCCESS
    diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
    index f47902371a..cffdfda961 100644
    --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
    +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
    @@ -102,7 +102,7 @@ type decoder struct {
     }
     
     // newError returns an error object with position info.
    -func (d decoder) newError(pos int, f string, x ...interface{}) error {
    +func (d decoder) newError(pos int, f string, x ...any) error {
     	line, column := d.Position(pos)
     	head := fmt.Sprintf("(line %d:%d): ", line, column)
     	return errors.New(head+f, x...)
    @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error {
     }
     
     // syntaxError returns a syntax error for given position.
    -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
    +func (d decoder) syntaxError(pos int, f string, x ...any) error {
     	line, column := d.Position(pos)
     	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
     	return errors.New(head+f, x...)
    @@ -192,7 +192,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro
     				fd = fieldDescs.ByTextName(name)
     			}
     		}
    -		if flags.ProtoLegacy {
    +		if flags.ProtoLegacyWeak {
     			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
     				fd = nil // reset since the weak reference is not linked in
     			}
    @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
     		panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
     	}
     
    -	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
    +	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
     }
     
     func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
    diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
    index 3f75098b6f..0e72d85378 100644
    --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
    +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
    @@ -25,15 +25,17 @@ const defaultIndent = "  "
     
     // Format formats the message as a multiline string.
     // This function is only intended for human consumption and ignores errors.
    -// Do not depend on the output being stable. It may change over time across
    -// different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func Format(m proto.Message) string {
     	return MarshalOptions{Multiline: true}.Format(m)
     }
     
     // Marshal writes the given [proto.Message] in JSON format using default options.
    -// Do not depend on the output being stable. It may change over time across
    -// different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func Marshal(m proto.Message) ([]byte, error) {
     	return MarshalOptions{}.Marshal(m)
     }
    @@ -110,8 +112,9 @@ type MarshalOptions struct {
     
     // Format formats the message as a string.
     // This method is only intended for human consumption and ignores errors.
    -// Do not depend on the output being stable. It may change over time across
    -// different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func (o MarshalOptions) Format(m proto.Message) string {
     	if m == nil || !m.ProtoReflect().IsValid() {
     		return "" // invalid syntax, but okay since this is for debugging
    @@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
     }
     
     // Marshal marshals the given [proto.Message] in the JSON format using options in
    -// MarshalOptions. Do not depend on the output being stable. It may change over
    -// time across different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
     	return o.marshal(nil, m)
     }
    @@ -212,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
     		}
     
     		v := m.Get(fd)
    -		isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
    -		isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
    -		if isProto2Scalar || isSingularMessage {
    +		if fd.HasPresence() {
     			if m.skipNull {
     				continue
     			}
    diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
    index 4b177c8206..e9fe103943 100644
    --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
    +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
    @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
     		switch tok.Kind() {
     		case json.ObjectClose:
     			if !found {
    -				return d.newError(tok.Pos(), `missing "value" field`)
    +				// We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
    +				// for compatibility with other proto runtimes that have interpreted the spec differently.
    +				if m.Descriptor().FullName() != genid.Empty_message_fullname {
    +					return d.newError(tok.Pos(), `missing "value" field`)
    +				}
     			}
     			return nil
     
    diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
    index a45f112bce..d972a3d98e 100644
    --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
    +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
    @@ -84,7 +84,7 @@ type decoder struct {
     }
     
     // newError returns an error object with position info.
    -func (d decoder) newError(pos int, f string, x ...interface{}) error {
    +func (d decoder) newError(pos int, f string, x ...any) error {
     	line, column := d.Position(pos)
     	head := fmt.Sprintf("(line %d:%d): ", line, column)
     	return errors.New(head+f, x...)
    @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error {
     }
     
     // syntaxError returns a syntax error for given position.
    -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
    +func (d decoder) syntaxError(pos int, f string, x ...any) error {
     	line, column := d.Position(pos)
     	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
     	return errors.New(head+f, x...)
    @@ -185,7 +185,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro
     		} else if xtErr != nil && xtErr != protoregistry.NotFound {
     			return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
     		}
    -		if flags.ProtoLegacy {
    +		if flags.ProtoLegacyWeak {
     			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
     				fd = nil // reset since the weak reference is not linked in
     			}
    diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
    index 95967e8112..1f57e6610a 100644
    --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
    +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
    @@ -27,15 +27,17 @@ const defaultIndent = "  "
     
     // Format formats the message as a multiline string.
     // This function is only intended for human consumption and ignores errors.
    -// Do not depend on the output being stable. It may change over time across
    -// different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func Format(m proto.Message) string {
     	return MarshalOptions{Multiline: true}.Format(m)
     }
     
     // Marshal writes the given [proto.Message] in textproto format using default
    -// options. Do not depend on the output being stable. It may change over time
    -// across different versions of the program.
    +// options. Do not depend on the output being stable. Its output will change
    +// across different builds of your program, even when using the same version of
    +// the protobuf module.
     func Marshal(m proto.Message) ([]byte, error) {
     	return MarshalOptions{}.Marshal(m)
     }
    @@ -84,8 +86,9 @@ type MarshalOptions struct {
     
     // Format formats the message as a string.
     // This method is only intended for human consumption and ignores errors.
    -// Do not depend on the output being stable. It may change over time across
    -// different versions of the program.
    +// Do not depend on the output being stable. Its output will change across
    +// different builds of your program, even when using the same version of the
    +// protobuf module.
     func (o MarshalOptions) Format(m proto.Message) string {
     	if m == nil || !m.ProtoReflect().IsValid() {
     		return "" // invalid syntax, but okay since this is for debugging
    @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
     }
     
     // Marshal writes the given [proto.Message] in textproto format using options in
    -// MarshalOptions object. Do not depend on the output being stable. It may
    -// change over time across different versions of the program.
    +// MarshalOptions object. Do not depend on the output being stable. Its output
    +// will change across different builds of your program, even when using the
    +// same version of the protobuf module.
     func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
     	return o.marshal(nil, m)
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
    index a45625c8d1..87e46bd4df 100644
    --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
    +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
    @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu
     				{rv.MethodByName("Values"), "Values"},
     				{rv.MethodByName("ReservedNames"), "ReservedNames"},
     				{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
    +				{rv.MethodByName("IsClosed"), "IsClosed"},
     			}...)
     
     		case protoreflect.EnumValueDescriptor:
    diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
    index 8401be8c84..024ffebd3d 100644
    --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
    +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
    @@ -9,7 +9,7 @@
     // dependency on the descriptor proto package).
     package descopts
     
    -import pref "google.golang.org/protobuf/reflect/protoreflect"
    +import "google.golang.org/protobuf/reflect/protoreflect"
     
     // These variables are set by the init function in descriptor.pb.go via logic
     // in internal/filetype. In other words, so long as the descriptor proto package
    @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
     //
     // Each variable is populated with a nil pointer to the options struct.
     var (
    -	File           pref.ProtoMessage
    -	Enum           pref.ProtoMessage
    -	EnumValue      pref.ProtoMessage
    -	Message        pref.ProtoMessage
    -	Field          pref.ProtoMessage
    -	Oneof          pref.ProtoMessage
    -	ExtensionRange pref.ProtoMessage
    -	Service        pref.ProtoMessage
    -	Method         pref.ProtoMessage
    +	File           protoreflect.ProtoMessage
    +	Enum           protoreflect.ProtoMessage
    +	EnumValue      protoreflect.ProtoMessage
    +	Message        protoreflect.ProtoMessage
    +	Field          protoreflect.ProtoMessage
    +	Oneof          protoreflect.ProtoMessage
    +	ExtensionRange protoreflect.ProtoMessage
    +	Service        protoreflect.ProtoMessage
    +	Method         protoreflect.ProtoMessage
     )
    diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
    index 18f0756874..5a57ef6f3c 100644
    Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
    diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
    new file mode 100644
    index 0000000000..bf1aba0e85
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
    @@ -0,0 +1,18 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package editionssupport defines constants for editions that are supported.
    +package editionssupport
    +
    +import "google.golang.org/protobuf/types/descriptorpb"
    +
    +const (
    +	Minimum = descriptorpb.Edition_EDITION_PROTO2
    +	Maximum = descriptorpb.Edition_EDITION_2023
    +
    +	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
    +	// declared as supported. In other words: end users cannot use it, but
    +	// testprotos inside Go Protobuf can.
    +	MaximumKnown = descriptorpb.Edition_EDITION_2024
    +)
    diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
    index d2b3ac031e..ea1d3e65a5 100644
    --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
    +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
    @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) {
     
     // newSyntaxError returns an error with line and column information useful for
     // syntax errors.
    -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
    +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
     	e := errors.New(f, x...)
     	line, column := d.Position(pos)
     	return errors.New("syntax error (line %d:%d): %v", line, column, e)
    diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    index 373d208374..7e87c76044 100644
    --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
    @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
     func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
     	f := new(filedesc.Field)
     	f.L0.ParentFile = filedesc.SurrogateProto2
    +	f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
     	for len(tag) > 0 {
     		i := strings.IndexByte(tag, ',')
     		if i < 0 {
    @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
     				f.L1.StringName.InitJSON(jsonName)
     			}
     		case s == "packed":
    -			f.L1.HasPacked = true
    -			f.L1.IsPacked = true
    +			f.L1.EditionFeatures.IsPacked = true
     		case strings.HasPrefix(s, "weak="):
     			f.L1.IsWeak = true
     			f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
    diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    index 87853e786d..099b2bf451 100644
    --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
    @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
     
     // newSyntaxError returns a syntax error with line and column information for
     // current position.
    -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
    +func (d *Decoder) newSyntaxError(f string, x ...any) error {
     	e := errors.New(f, x...)
     	line, column := d.Position(len(d.orig) - len(d.in))
     	return errors.New("syntax error (line %d:%d): %v", line, column, e)
    diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
    index 20c17b35e3..c2d6bd5265 100644
    --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go
    +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
    @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error")
     
     // New formats a string according to the format specifier and arguments and
     // returns an error that has a "proto" prefix.
    -func New(f string, x ...interface{}) error {
    +func New(f string, x ...any) error {
     	return &prefixError{s: format(f, x...)}
     }
     
    @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error {
     
     // Wrap returns an error that has a "proto" prefix, the formatted string described
     // by the format specifier and arguments, and a suffix of err. The error wraps err.
    -func Wrap(err error, f string, x ...interface{}) error {
    +func Wrap(err error, f string, x ...any) error {
     	return &wrapError{
     		s:   format(f, x...),
     		err: err,
    @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool {
     	return target == Error
     }
     
    -func format(f string, x ...interface{}) string {
    +func format(f string, x ...any) string {
     	// avoid "proto: " prefix when chaining
     	for i := 0; i < len(x); i++ {
     		switch e := x[i].(type) {
    @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error {
     func RequiredNotSet(name string) error {
     	return New("required field %v not set", name)
     }
    +
    +type SizeMismatchError struct {
    +	Calculated, Measured int
    +}
    +
    +func (e *SizeMismatchError) Error() string {
    +	return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured)
    +}
    +
    +func MismatchedSizeCalculation(calculated, measured int) error {
    +	return &SizeMismatchError{
    +		Calculated: calculated,
    +		Measured:   measured,
    +	}
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
    deleted file mode 100644
    index fbcd349207..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
    +++ /dev/null
    @@ -1,40 +0,0 @@
    -// Copyright 2020 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.13
    -// +build !go1.13
    -
    -package errors
    -
    -import "reflect"
    -
    -// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
    -func Is(err, target error) bool {
    -	if target == nil {
    -		return err == target
    -	}
    -
    -	isComparable := reflect.TypeOf(target).Comparable()
    -	for {
    -		if isComparable && err == target {
    -			return true
    -		}
    -		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
    -			return true
    -		}
    -		if err = unwrap(err); err == nil {
    -			return false
    -		}
    -	}
    -}
    -
    -func unwrap(err error) error {
    -	u, ok := err.(interface {
    -		Unwrap() error
    -	})
    -	if !ok {
    -		return nil
    -	}
    -	return u.Unwrap()
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
    deleted file mode 100644
    index 5e72f1cde9..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -// Copyright 2020 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.13
    -// +build go1.13
    -
    -package errors
    -
    -import "errors"
    -
    -// Is is errors.Is.
    -func Is(err, target error) bool { return errors.Is(err, target) }
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    index 8826bcf402..378b826faa 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
    @@ -7,6 +7,7 @@ package filedesc
     import (
     	"bytes"
     	"fmt"
    +	"strings"
     	"sync"
     	"sync/atomic"
     
    @@ -31,6 +32,7 @@ const (
     	EditionProto2      Edition = 998
     	EditionProto3      Edition = 999
     	Edition2023        Edition = 1000
    +	Edition2024        Edition = 1001
     	EditionUnsupported Edition = 100000
     )
     
    @@ -76,31 +78,48 @@ type (
     		Locations SourceLocations
     	}
     
    +	// EditionFeatures is a frequently-instantiated struct, so please take care
    +	// to minimize padding when adding new fields to this struct (add them in
    +	// the right place/order).
     	EditionFeatures struct {
    +		// StripEnumPrefix determines if the plugin generates enum value
    +		// constants as-is, with their prefix stripped, or both variants.
    +		StripEnumPrefix int
    +
     		// IsFieldPresence is true if field_presence is EXPLICIT
     		// https://protobuf.dev/editions/features/#field_presence
     		IsFieldPresence bool
    +
     		// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
     		// https://protobuf.dev/editions/features/#field_presence
     		IsLegacyRequired bool
    +
     		// IsOpenEnum is true if enum_type is OPEN
     		// https://protobuf.dev/editions/features/#enum_type
     		IsOpenEnum bool
    +
     		// IsPacked is true if repeated_field_encoding is PACKED
     		// https://protobuf.dev/editions/features/#repeated_field_encoding
     		IsPacked bool
    +
     		// IsUTF8Validated is true if utf_validation is VERIFY
     		// https://protobuf.dev/editions/features/#utf8_validation
     		IsUTF8Validated bool
    +
     		// IsDelimitedEncoded is true if message_encoding is DELIMITED
     		// https://protobuf.dev/editions/features/#message_encoding
     		IsDelimitedEncoded bool
    +
     		// IsJSONCompliant is true if json_format is ALLOW
     		// https://protobuf.dev/editions/features/#json_format
     		IsJSONCompliant bool
    +
     		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
     		// UnmarshalJSON([]byte) error method for enums.
     		GenerateLegacyUnmarshalJSON bool
    +		// APILevel controls which API (Open, Hybrid or Opaque) should be used
    +		// for generated code (.pb.go files).
    +		APILevel int
     	}
     )
     
    @@ -108,9 +127,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
     func (fd *File) Parent() protoreflect.Descriptor         { return nil }
     func (fd *File) Index() int                              { return 0 }
     func (fd *File) Syntax() protoreflect.Syntax             { return fd.L1.Syntax }
    -func (fd *File) Name() protoreflect.Name                 { return fd.L1.Package.Name() }
    -func (fd *File) FullName() protoreflect.FullName         { return fd.L1.Package }
    -func (fd *File) IsPlaceholder() bool                     { return false }
    +
    +// Not exported and just used to reconstruct the original FileDescriptor proto
    +func (fd *File) Edition() int32                  { return int32(fd.L1.Edition) }
    +func (fd *File) Name() protoreflect.Name         { return fd.L1.Package.Name() }
    +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
    +func (fd *File) IsPlaceholder() bool             { return false }
     func (fd *File) Options() protoreflect.ProtoMessage {
     	if f := fd.lazyInit().Options; f != nil {
     		return f()
    @@ -202,6 +224,9 @@ func (ed *Enum) lazyInit() *EnumL2 {
     	ed.L0.ParentFile.lazyInit() // implicitly initializes L2
     	return ed.L2
     }
    +func (ed *Enum) IsClosed() bool {
    +	return !ed.L1.EditionFeatures.IsOpenEnum
    +}
     
     func (ed *EnumValue) Options() protoreflect.ProtoMessage {
     	if f := ed.L1.Options; f != nil {
    @@ -251,10 +276,7 @@ type (
     		StringName       stringName
     		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
     		IsWeak           bool // promoted from google.protobuf.FieldOptions
    -		HasPacked        bool // promoted from google.protobuf.FieldOptions
    -		IsPacked         bool // promoted from google.protobuf.FieldOptions
    -		HasEnforceUTF8   bool // promoted from google.protobuf.FieldOptions
    -		EnforceUTF8      bool // promoted from google.protobuf.FieldOptions
    +		IsLazy           bool // promoted from google.protobuf.FieldOptions
     		Default          defaultValue
     		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
     		Enum             protoreflect.EnumDescriptor
    @@ -331,8 +353,7 @@ func (fd *Field) HasPresence() bool {
     	if fd.L1.Cardinality == protoreflect.Repeated {
     		return false
     	}
    -	explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
    -	return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
    +	return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
     }
     func (fd *Field) HasOptionalKeyword() bool {
     	return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
    @@ -345,17 +366,11 @@ func (fd *Field) IsPacked() bool {
     	case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
     		return false
     	}
    -	if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
    -		return fd.L1.EditionFeatures.IsPacked
    -	}
    -	if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
    -		// proto3 repeated fields are packed by default.
    -		return !fd.L1.HasPacked || fd.L1.IsPacked
    -	}
    -	return fd.L1.IsPacked
    +	return fd.L1.EditionFeatures.IsPacked
     }
     func (fd *Field) IsExtension() bool { return false }
     func (fd *Field) IsWeak() bool      { return fd.L1.IsWeak }
    +func (fd *Field) IsLazy() bool      { return fd.L1.IsLazy }
     func (fd *Field) IsList() bool      { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
     func (fd *Field) IsMap() bool       { return fd.Message() != nil && fd.Message().IsMapEntry() }
     func (fd *Field) MapKey() protoreflect.FieldDescriptor {
    @@ -388,6 +403,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor {
     	}
     	return fd.L1.Message
     }
    +func (fd *Field) IsMapEntry() bool {
    +	parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
    +	return ok && parent.IsMapEntry()
    +}
     func (fd *Field) Format(s fmt.State, r rune)             { descfmt.FormatDesc(s, r, fd) }
     func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
     
    @@ -399,13 +418,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
     // WARNING: This method is exempt from the compatibility promise and may be
     // removed in the future without warning.
     func (fd *Field) EnforceUTF8() bool {
    -	if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
    -		return fd.L1.EditionFeatures.IsUTF8Validated
    -	}
    -	if fd.L1.HasEnforceUTF8 {
    -		return fd.L1.EnforceUTF8
    -	}
    -	return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
    +	return fd.L1.EditionFeatures.IsUTF8Validated
     }
     
     func (od *Oneof) IsSynthetic() bool {
    @@ -432,13 +445,13 @@ type (
     		Extendee        protoreflect.MessageDescriptor
     		Cardinality     protoreflect.Cardinality
     		Kind            protoreflect.Kind
    +		IsLazy          bool
     		EditionFeatures EditionFeatures
     	}
     	ExtensionL2 struct {
     		Options          func() protoreflect.ProtoMessage
     		StringName       stringName
     		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
    -		IsPacked         bool // promoted from google.protobuf.FieldOptions
     		Default          defaultValue
     		Enum             protoreflect.EnumDescriptor
     		Message          protoreflect.MessageDescriptor
    @@ -461,9 +474,19 @@ func (xd *Extension) HasPresence() bool                     { return xd.L1.Cardi
     func (xd *Extension) HasOptionalKeyword() bool {
     	return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
     }
    -func (xd *Extension) IsPacked() bool                         { return xd.lazyInit().IsPacked }
    +func (xd *Extension) IsPacked() bool {
    +	if xd.L1.Cardinality != protoreflect.Repeated {
    +		return false
    +	}
    +	switch xd.L1.Kind {
    +	case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
    +		return false
    +	}
    +	return xd.L1.EditionFeatures.IsPacked
    +}
     func (xd *Extension) IsExtension() bool                      { return true }
     func (xd *Extension) IsWeak() bool                           { return false }
    +func (xd *Extension) IsLazy() bool                           { return xd.L1.IsLazy }
     func (xd *Extension) IsList() bool                           { return xd.Cardinality() == protoreflect.Repeated }
     func (xd *Extension) IsMap() bool                            { return false }
     func (xd *Extension) MapKey() protoreflect.FieldDescriptor   { return nil }
    @@ -542,8 +565,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement)     {}
     // Surrogate files are can be used to create standalone descriptors
     // where the syntax is only information derived from the parent file.
     var (
    -	SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
    -	SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
    +	SurrogateProto2      = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
    +	SurrogateProto3      = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
    +	SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}}
     )
     
     type (
    @@ -585,6 +609,34 @@ func (s *stringName) InitJSON(name string) {
     	s.nameJSON = name
     }
     
    +// Returns true if this field is structured like the synthetic field of a proto2
    +// group. This allows us to expand our treatment of delimited fields without
    +// breaking proto2 files that have been upgraded to editions.
    +func isGroupLike(fd protoreflect.FieldDescriptor) bool {
    +	// Groups are always group types.
    +	if fd.Kind() != protoreflect.GroupKind {
    +		return false
    +	}
    +
    +	// Group fields are always the lowercase type name.
    +	if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) {
    +		return false
    +	}
    +
    +	// Groups could only be defined in the same file they're used.
    +	if fd.Message().ParentFile() != fd.ParentFile() {
    +		return false
    +	}
    +
    +	// Group messages are always defined in the same scope as the field.  File
    +	// level extensions will compare NULL == NULL here, which is why the file
    +	// comparison above is necessary to ensure both come from the same file.
    +	if fd.IsExtension() {
    +		return fd.Parent() == fd.Message().Parent()
    +	}
    +	return fd.ContainingMessage() == fd.Message().Parent()
    +}
    +
     func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
     	s.once.Do(func() {
     		if fd.IsExtension() {
    @@ -605,7 +657,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
     
     			// Format the text name.
     			s.nameText = string(fd.Name())
    -			if fd.Kind() == protoreflect.GroupKind {
    +			if isGroupLike(fd) {
     				s.nameText = string(fd.Message().Name())
     			}
     		}
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    index 237e64fd23..d2f549497e 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
    @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) {
     				switch string(v) {
     				case "proto2":
     					fd.L1.Syntax = protoreflect.Proto2
    +					fd.L1.Edition = EditionProto2
     				case "proto3":
     					fd.L1.Syntax = protoreflect.Proto3
    +					fd.L1.Edition = EditionProto3
     				case "editions":
     					fd.L1.Syntax = protoreflect.Editions
     				default:
    @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) {
     	// If syntax is missing, it is assumed to be proto2.
     	if fd.L1.Syntax == 0 {
     		fd.L1.Syntax = protoreflect.Proto2
    +		fd.L1.Edition = EditionProto2
     	}
     
    -	if fd.L1.Syntax == protoreflect.Editions {
    -		fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
    -	}
    +	fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
     
     	// Parse editions features from options if any
     	if options != nil {
    @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
     	ed.L0.ParentFile = pf
     	ed.L0.Parent = pd
     	ed.L0.Index = i
    +	ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent())
     
     	var numValues int
     	for b := b; len(b) > 0; {
    @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
     	xd.L0.ParentFile = pf
     	xd.L0.Parent = pd
     	xd.L0.Index = i
    +	xd.L1.EditionFeatures = featuresFromParentDesc(pd)
     
     	for len(b) > 0 {
     		num, typ, n := protowire.ConsumeTag(b)
    @@ -467,6 +470,40 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
     				xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
     			case genid.FieldDescriptorProto_Extendee_field_number:
     				xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
    +			case genid.FieldDescriptorProto_Options_field_number:
    +				xd.unmarshalOptions(v)
    +			}
    +		default:
    +			m := protowire.ConsumeFieldValue(num, typ, b)
    +			b = b[m:]
    +		}
    +	}
    +
    +	if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
    +		xd.L1.Kind = protoreflect.GroupKind
    +	}
    +}
    +
    +func (xd *Extension) unmarshalOptions(b []byte) {
    +	for len(b) > 0 {
    +		num, typ, n := protowire.ConsumeTag(b)
    +		b = b[n:]
    +		switch typ {
    +		case protowire.VarintType:
    +			v, m := protowire.ConsumeVarint(b)
    +			b = b[m:]
    +			switch num {
    +			case genid.FieldOptions_Packed_field_number:
    +				xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
    +			case genid.FieldOptions_Lazy_field_number:
    +				xd.L1.IsLazy = protowire.DecodeBool(v)
    +			}
    +		case protowire.BytesType:
    +			v, m := protowire.ConsumeBytes(b)
    +			b = b[m:]
    +			switch num {
    +			case genid.FieldOptions_Features_field_number:
    +				xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
     			}
     		default:
     			m := protowire.ConsumeFieldValue(num, typ, b)
    @@ -499,7 +536,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
     }
     
     var nameBuilderPool = sync.Pool{
    -	New: func() interface{} { return new(strs.Builder) },
    +	New: func() any { return new(strs.Builder) },
     }
     
     func getBuilder() *strs.Builder {
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    index 482a61cc10..67a51b327c 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
    @@ -45,6 +45,11 @@ func (file *File) resolveMessages() {
     			case protoreflect.MessageKind, protoreflect.GroupKind:
     				fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
     				depIdx++
    +				if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
    +					// A map field might inherit delimited encoding from a file-wide default feature.
    +					// But maps never actually use delimited encoding. (At least for now...)
    +					fd.L1.Kind = protoreflect.MessageKind
    +				}
     			}
     
     			// Default is resolved here since it depends on Enum being resolved.
    @@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
     			b = b[m:]
     		}
     	}
    -	if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
    +	if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
     		fd.L1.Kind = protoreflect.GroupKind
     	}
    -	if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
    +	if fd.L1.EditionFeatures.IsLegacyRequired {
     		fd.L1.Cardinality = protoreflect.Required
     	}
     	if rawTypeName != nil {
    @@ -496,13 +501,13 @@ func (fd *Field) unmarshalOptions(b []byte) {
     			b = b[m:]
     			switch num {
     			case genid.FieldOptions_Packed_field_number:
    -				fd.L1.HasPacked = true
    -				fd.L1.IsPacked = protowire.DecodeBool(v)
    +				fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
     			case genid.FieldOptions_Weak_field_number:
     				fd.L1.IsWeak = protowire.DecodeBool(v)
    +			case genid.FieldOptions_Lazy_field_number:
    +				fd.L1.IsLazy = protowire.DecodeBool(v)
     			case FieldOptions_EnforceUTF8:
    -				fd.L1.HasEnforceUTF8 = true
    -				fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
    +				fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
     			}
     		case protowire.BytesType:
     			v, m := protowire.ConsumeBytes(b)
    @@ -548,7 +553,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
     func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
     	var rawTypeName []byte
     	var rawOptions []byte
    -	xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
     	xd.L2 = new(ExtensionL2)
     	for len(b) > 0 {
     		num, typ, n := protowire.ConsumeTag(b)
    @@ -572,7 +576,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
     			case genid.FieldDescriptorProto_TypeName_field_number:
     				rawTypeName = v
     			case genid.FieldDescriptorProto_Options_field_number:
    -				xd.unmarshalOptions(v)
     				rawOptions = appendOptions(rawOptions, v)
     			}
     		default:
    @@ -580,12 +583,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
     			b = b[m:]
     		}
     	}
    -	if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
    -		xd.L1.Kind = protoreflect.GroupKind
    -	}
    -	if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
    -		xd.L1.Cardinality = protoreflect.Required
    -	}
     	if rawTypeName != nil {
     		name := makeFullName(sb, rawTypeName)
     		switch xd.L1.Kind {
    @@ -598,32 +595,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
     	xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
     }
     
    -func (xd *Extension) unmarshalOptions(b []byte) {
    -	for len(b) > 0 {
    -		num, typ, n := protowire.ConsumeTag(b)
    -		b = b[n:]
    -		switch typ {
    -		case protowire.VarintType:
    -			v, m := protowire.ConsumeVarint(b)
    -			b = b[m:]
    -			switch num {
    -			case genid.FieldOptions_Packed_field_number:
    -				xd.L2.IsPacked = protowire.DecodeBool(v)
    -			}
    -		case protowire.BytesType:
    -			v, m := protowire.ConsumeBytes(b)
    -			b = b[m:]
    -			switch num {
    -			case genid.FieldOptions_Features_field_number:
    -				xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
    -			}
    -		default:
    -			m := protowire.ConsumeFieldValue(num, typ, b)
    -			b = b[m:]
    -		}
    -	}
    -}
    -
     func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
     	var rawMethods [][]byte
     	var rawOptions []byte
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
    index 30db19fdc7..f4107c05f4 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
    @@ -8,6 +8,7 @@ package filedesc
     
     import (
     	"fmt"
    +	"strings"
     	"sync"
     
     	"google.golang.org/protobuf/internal/descfmt"
    @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields {
     				if _, ok := p.byText[d.TextName()]; !ok {
     					p.byText[d.TextName()] = d
     				}
    +				if isGroupLike(d) {
    +					lowerJSONName := strings.ToLower(d.JSONName())
    +					if _, ok := p.byJSON[lowerJSONName]; !ok {
    +						p.byJSON[lowerJSONName] = d
    +					}
    +					lowerTextName := strings.ToLower(d.TextName())
    +					if _, ok := p.byText[lowerTextName]; !ok {
    +						p.byText[lowerTextName] = d
    +					}
    +				}
     				if _, ok := p.byNum[d.Number()]; !ok {
     					p.byNum[d.Number()] = d
     				}
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    index 0375a49d40..10132c9b38 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
    @@ -14,9 +14,13 @@ import (
     )
     
     var defaultsCache = make(map[Edition]EditionFeatures)
    +var defaultsKeys = []Edition{}
     
     func init() {
     	unmarshalEditionDefaults(editiondefaults.Defaults)
    +	SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2)
    +	SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3)
    +	SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023)
     }
     
     func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
    @@ -28,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
     			v, m := protowire.ConsumeVarint(b)
     			b = b[m:]
     			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
    +		case genid.GoFeatures_ApiLevel_field_number:
    +			v, m := protowire.ConsumeVarint(b)
    +			b = b[m:]
    +			parent.APILevel = int(v)
    +		case genid.GoFeatures_StripEnumPrefix_field_number:
    +			v, m := protowire.ConsumeVarint(b)
    +			b = b[m:]
    +			parent.StripEnumPrefix = int(v)
     		default:
     			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
     		}
    @@ -64,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
     			v, m := protowire.ConsumeBytes(b)
     			b = b[m:]
     			switch num {
    -			case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
    +			case genid.FeatureSet_Go_ext_number:
     				parent = unmarshalGoFeature(v, parent)
     			}
     		}
    @@ -104,12 +116,15 @@ func unmarshalEditionDefault(b []byte) {
     			v, m := protowire.ConsumeBytes(b)
     			b = b[m:]
     			switch num {
    -			case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
    +			case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number:
    +				fs = unmarshalFeatureSet(v, fs)
    +			case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number:
     				fs = unmarshalFeatureSet(v, fs)
     			}
     		}
     	}
     	defaultsCache[ed] = fs
    +	defaultsKeys = append(defaultsKeys, ed)
     }
     
     func unmarshalEditionDefaults(b []byte) {
    @@ -135,8 +150,15 @@ func unmarshalEditionDefaults(b []byte) {
     }
     
     func getFeaturesFor(ed Edition) EditionFeatures {
    -	if def, ok := defaultsCache[ed]; ok {
    -		return def
    +	match := EditionUnknown
    +	for _, key := range defaultsKeys {
    +		if key > ed {
    +			break
    +		}
    +		match = key
    +	}
    +	if match == EditionUnknown {
    +		panic(fmt.Sprintf("unsupported edition: %v", ed))
     	}
    -	panic(fmt.Sprintf("unsupported edition: %v", ed))
    +	return defaultsCache[match]
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
    index 28240ebc5c..bfb3b84170 100644
    --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
    +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
    @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage        { return des
     func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
     func (e PlaceholderEnum) ReservedNames() protoreflect.Names         { return emptyNames }
     func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges   { return emptyEnumRanges }
    +func (e PlaceholderEnum) IsClosed() bool                            { return false }
     func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor)     { return }
     func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement)       { return }
     
    diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
    index f0e38c4ef4..ba83fea44c 100644
    --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
    +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
    @@ -68,7 +68,7 @@ type Builder struct {
     	// and for input and output messages referenced by service methods.
     	// Dependencies must come after declarations, but the ordering of
     	// dependencies themselves is unspecified.
    -	GoTypes []interface{}
    +	GoTypes []any
     
     	// DependencyIndexes is an ordered list of indexes into GoTypes for the
     	// dependencies of messages, extensions, or services.
    @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 {
     
     type (
     	resolverByIndex struct {
    -		goTypes []interface{}
    +		goTypes []any
     		depIdxs depIdxs
     		fileRegistry
     	}
    diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
    index 58372dd348..5cb3ee70f9 100644
    --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
    +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
    @@ -22,3 +22,8 @@ const ProtoLegacy = protoLegacy
     // extension fields at unmarshal time, but defers creating the message
     // structure until the extension is first accessed.
     const LazyUnmarshalExtensions = ProtoLegacy
    +
    +// ProtoLegacyWeak specifies whether to enable support for weak fields.
    +// This flag was split out of ProtoLegacy in preparation for removing
    +// support for weak fields (independent of the other protolegacy features).
    +const ProtoLegacyWeak = ProtoLegacy
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    index 40272c893f..f30ab6b586 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
    @@ -21,6 +21,7 @@ const (
     // Enum values for google.protobuf.Edition.
     const (
     	Edition_EDITION_UNKNOWN_enum_value         = 0
    +	Edition_EDITION_LEGACY_enum_value          = 900
     	Edition_EDITION_PROTO2_enum_value          = 998
     	Edition_EDITION_PROTO3_enum_value          = 999
     	Edition_EDITION_2023_enum_value            = 1000
    @@ -653,6 +654,7 @@ const (
     	FieldOptions_Targets_field_name             protoreflect.Name = "targets"
     	FieldOptions_EditionDefaults_field_name     protoreflect.Name = "edition_defaults"
     	FieldOptions_Features_field_name            protoreflect.Name = "features"
    +	FieldOptions_FeatureSupport_field_name      protoreflect.Name = "feature_support"
     	FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
     
     	FieldOptions_Ctype_field_fullname               protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
    @@ -667,6 +669,7 @@ const (
     	FieldOptions_Targets_field_fullname             protoreflect.FullName = "google.protobuf.FieldOptions.targets"
     	FieldOptions_EditionDefaults_field_fullname     protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
     	FieldOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.FieldOptions.features"
    +	FieldOptions_FeatureSupport_field_fullname      protoreflect.FullName = "google.protobuf.FieldOptions.feature_support"
     	FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
     )
     
    @@ -684,6 +687,7 @@ const (
     	FieldOptions_Targets_field_number             protoreflect.FieldNumber = 19
     	FieldOptions_EditionDefaults_field_number     protoreflect.FieldNumber = 20
     	FieldOptions_Features_field_number            protoreflect.FieldNumber = 21
    +	FieldOptions_FeatureSupport_field_number      protoreflect.FieldNumber = 22
     	FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
     )
     
    @@ -767,6 +771,33 @@ const (
     	FieldOptions_EditionDefault_Value_field_number   protoreflect.FieldNumber = 2
     )
     
    +// Names for google.protobuf.FieldOptions.FeatureSupport.
    +const (
    +	FieldOptions_FeatureSupport_message_name     protoreflect.Name     = "FeatureSupport"
    +	FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport"
    +)
    +
    +// Field names for google.protobuf.FieldOptions.FeatureSupport.
    +const (
    +	FieldOptions_FeatureSupport_EditionIntroduced_field_name  protoreflect.Name = "edition_introduced"
    +	FieldOptions_FeatureSupport_EditionDeprecated_field_name  protoreflect.Name = "edition_deprecated"
    +	FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning"
    +	FieldOptions_FeatureSupport_EditionRemoved_field_name     protoreflect.Name = "edition_removed"
    +
    +	FieldOptions_FeatureSupport_EditionIntroduced_field_fullname  protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced"
    +	FieldOptions_FeatureSupport_EditionDeprecated_field_fullname  protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated"
    +	FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning"
    +	FieldOptions_FeatureSupport_EditionRemoved_field_fullname     protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed"
    +)
    +
    +// Field numbers for google.protobuf.FieldOptions.FeatureSupport.
    +const (
    +	FieldOptions_FeatureSupport_EditionIntroduced_field_number  protoreflect.FieldNumber = 1
    +	FieldOptions_FeatureSupport_EditionDeprecated_field_number  protoreflect.FieldNumber = 2
    +	FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3
    +	FieldOptions_FeatureSupport_EditionRemoved_field_number     protoreflect.FieldNumber = 4
    +)
    +
     // Names for google.protobuf.OneofOptions.
     const (
     	OneofOptions_message_name     protoreflect.Name     = "OneofOptions"
    @@ -829,11 +860,13 @@ const (
     	EnumValueOptions_Deprecated_field_name          protoreflect.Name = "deprecated"
     	EnumValueOptions_Features_field_name            protoreflect.Name = "features"
     	EnumValueOptions_DebugRedact_field_name         protoreflect.Name = "debug_redact"
    +	EnumValueOptions_FeatureSupport_field_name      protoreflect.Name = "feature_support"
     	EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
     
     	EnumValueOptions_Deprecated_field_fullname          protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
     	EnumValueOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
     	EnumValueOptions_DebugRedact_field_fullname         protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
    +	EnumValueOptions_FeatureSupport_field_fullname      protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
     	EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
     )
     
    @@ -842,6 +875,7 @@ const (
     	EnumValueOptions_Deprecated_field_number          protoreflect.FieldNumber = 1
     	EnumValueOptions_Features_field_number            protoreflect.FieldNumber = 2
     	EnumValueOptions_DebugRedact_field_number         protoreflect.FieldNumber = 3
    +	EnumValueOptions_FeatureSupport_field_number      protoreflect.FieldNumber = 4
     	EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
     )
     
    @@ -1110,17 +1144,20 @@ const (
     
     // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
     const (
    -	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name  protoreflect.Name = "edition"
    -	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
    +	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name             protoreflect.Name = "edition"
    +	FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features"
    +	FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name       protoreflect.Name = "fixed_features"
     
    -	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname  protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
    -	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
    +	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname             protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
    +	FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features"
    +	FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features"
     )
     
     // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
     const (
    -	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number  protoreflect.FieldNumber = 3
    -	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
    +	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number             protoreflect.FieldNumber = 3
    +	FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4
    +	FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number       protoreflect.FieldNumber = 5
     )
     
     // Names for google.protobuf.SourceCodeInfo.
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
    index 45ccd01211..d9b9d916a2 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
    @@ -6,6 +6,6 @@
     // and the well-known types.
     package genid
     
    -import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +import "google.golang.org/protobuf/reflect/protoreflect"
     
     const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
    index fd9015e8ee..f5ee7f5c2b 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
    @@ -10,22 +10,61 @@ import (
     	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
     )
     
    -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto"
    +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
     
    -// Names for google.protobuf.GoFeatures.
    +// Names for pb.GoFeatures.
     const (
     	GoFeatures_message_name     protoreflect.Name     = "GoFeatures"
    -	GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
    +	GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
     )
     
    -// Field names for google.protobuf.GoFeatures.
    +// Field names for pb.GoFeatures.
     const (
     	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
    +	GoFeatures_ApiLevel_field_name                protoreflect.Name = "api_level"
    +	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
     
    -	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
    +	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
    +	GoFeatures_ApiLevel_field_fullname                protoreflect.FullName = "pb.GoFeatures.api_level"
    +	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
     )
     
    -// Field numbers for google.protobuf.GoFeatures.
    +// Field numbers for pb.GoFeatures.
     const (
     	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
    +	GoFeatures_ApiLevel_field_number                protoreflect.FieldNumber = 2
    +	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
    +)
    +
    +// Full and short names for pb.GoFeatures.APILevel.
    +const (
    +	GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
    +	GoFeatures_APILevel_enum_name     = "APILevel"
    +)
    +
    +// Enum values for pb.GoFeatures.APILevel.
    +const (
    +	GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
    +	GoFeatures_API_OPEN_enum_value              = 1
    +	GoFeatures_API_HYBRID_enum_value            = 2
    +	GoFeatures_API_OPAQUE_enum_value            = 3
    +)
    +
    +// Full and short names for pb.GoFeatures.StripEnumPrefix.
    +const (
    +	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
    +	GoFeatures_StripEnumPrefix_enum_name     = "StripEnumPrefix"
    +)
    +
    +// Enum values for pb.GoFeatures.StripEnumPrefix.
    +const (
    +	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value   = 0
    +	GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value          = 1
    +	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
    +	GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value         = 3
    +)
    +
    +// Extension numbers
    +const (
    +	FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
     )
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
    index 8f9ea02ff2..bef5a25fbb 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
    @@ -4,7 +4,7 @@
     
     package genid
     
    -import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +import "google.golang.org/protobuf/reflect/protoreflect"
     
     // Generic field names and numbers for synthetic map entry messages.
     const (
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
    new file mode 100644
    index 0000000000..224f339302
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
    @@ -0,0 +1,12 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package genid
    +
    +const (
    +	NoUnkeyedLiteral_goname  = "noUnkeyedLiteral"
    +	NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
    +
    +	BuilderSuffix_goname = "_builder"
    +)
    diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
    index 429384b85b..9404270de0 100644
    --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
    +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
    @@ -4,7 +4,7 @@
     
     package genid
     
    -import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +import "google.golang.org/protobuf/reflect/protoreflect"
     
     // Generic field name and number for messages in wrappers.proto.
     const (
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
    index a371f98de1..5d5771c2ed 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
    @@ -22,13 +22,13 @@ type Export struct{}
     
     // NewError formats a string according to the format specifier and arguments and
     // returns an error that has a "proto" prefix.
    -func (Export) NewError(f string, x ...interface{}) error {
    +func (Export) NewError(f string, x ...any) error {
     	return errors.New(f, x...)
     }
     
     // enum is any enum type generated by protoc-gen-go
     // and must be a named int32 type.
    -type enum = interface{}
    +type enum = any
     
     // EnumOf returns the protoreflect.Enum interface over e.
     // It returns nil if e is nil.
    @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu
     
     // message is any message type generated by protoc-gen-go
     // and must be a pointer to a named struct type.
    -type message = interface{}
    +type message = any
     
     // legacyMessageWrapper wraps a v2 message as a v1 message.
     type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
    new file mode 100644
    index 0000000000..6075d6f696
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
    @@ -0,0 +1,128 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"strconv"
    +	"sync/atomic"
    +	"unsafe"
    +
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +func (Export) UnmarshalField(msg any, fieldNum int32) {
    +	UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
    +}
    +
    +// Present checks the presence set for a certain field number (zero
    +// based, ordered by appearance in original proto file). part is
    +// a pointer to the correct element in the bitmask array, num is the
    +// field number unaltered.  Example (field number 70 -> part =
    +// &m.XXX_presence[1], num = 70)
    +func (Export) Present(part *uint32, num uint32) bool {
    +	// This hook will read an unprotected shadow presence set if
    +	// we're unning under the race detector
    +	raceDetectHookPresent(part, num)
    +	return atomic.LoadUint32(part)&(1<<(num%32)) > 0
    +}
    +
    +// SetPresent adds a field to the presence set. part is a pointer to
    +// the relevant element in the array and num is the field number
    +// unaltered.  size is the number of fields in the protocol
    +// buffer.
    +func (Export) SetPresent(part *uint32, num uint32, size uint32) {
    +	// This hook will mutate an unprotected shadow presence set if
    +	// we're running under the race detector
    +	raceDetectHookSetPresent(part, num, presenceSize(size))
    +	for {
    +		old := atomic.LoadUint32(part)
    +		if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
    +			return
    +		}
    +	}
    +}
    +
    +// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
    +// It is meant for use by builder methods, where the message is known not
    +// to be accessible yet by other goroutines.
    +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
    +	// This hook will mutate an unprotected shadow presence set if
    +	// we're running under the race detector
    +	raceDetectHookSetPresent(part, num, presenceSize(size))
    +	*part |= 1 << (num % 32)
    +}
    +
    +// ClearPresence removes a field from the presence set. part is a
    +// pointer to the relevant element in the presence array and num is
    +// the field number unaltered.
    +func (Export) ClearPresent(part *uint32, num uint32) {
    +	// This hook will mutate an unprotected shadow presence set if
    +	// we're running under the race detector
    +	raceDetectHookClearPresent(part, num)
    +	for {
    +		old := atomic.LoadUint32(part)
    +		if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
    +			return
    +		}
    +	}
    +}
    +
    +// interfaceToPointer takes a pointer to an empty interface whose value is a
    +// pointer type, and converts it into a "pointer" that points to the same
    +// target
    +func interfaceToPointer(i *any) pointer {
    +	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
    +}
    +
    +func (p pointer) atomicGetPointer() pointer {
    +	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
    +}
    +
    +func (p pointer) atomicSetPointer(q pointer) {
    +	atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
    +}
    +
    +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
    +// pointer) and returns true if the pointed-to pointer is nil (using an
    +// atomic load).  This function is inlineable and, on x86, just becomes a
    +// simple load and compare.
    +func (Export) AtomicCheckPointerIsNil(ptr any) bool {
    +	return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
    +}
    +
    +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
    +// second is a pointer) and atomically sets the second pointer into location
    +// referenced by first pointer.  Unfortunately, atomicSetPointer() does not inline
    +// (even on x86), so this does not become a simple store on x86.
    +func (Export) AtomicSetPointer(dstPtr, valPtr any) {
    +	interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
    +}
    +
    +// AtomicLoadPointer loads the pointer at the location pointed at by src,
    +// and stores that pointer value into the location pointed at by dst.
    +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
    +	*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
    +}
    +
    +// AtomicInitializePointer makes ptr and dst point to the same value.
    +//
    +// If *ptr is a nil pointer, it sets *ptr = *dst.
    +//
    +// If *ptr is a non-nil pointer, it sets *dst = *ptr.
    +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
    +	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
    +		*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
    +	}
    +}
    +
    +// MessageFieldStringOf returns the field formatted as a string,
    +// either as the field name if resolvable otherwise as a decimal string.
    +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
    +	fd := md.Fields().ByNumber(n)
    +	if fd != nil {
    +		return string(fd.Name())
    +	}
    +	return strconv.Itoa(int(n))
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
    new file mode 100644
    index 0000000000..ea276547cd
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
    @@ -0,0 +1,34 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +//go:build !race
    +
    +package impl
    +
    +// There is no additional data as we're not running under race detector.
    +type RaceDetectHookData struct{}
    +
    +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
    +func (presence) raceDetectHookPresent(num uint32)                       {}
    +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
    +func (presence) raceDetectHookClearPresent(num uint32)                  {}
    +func (presence) raceDetectHookAllocAndCopy(src presence)                {}
    +
    +// raceDetectHookPresent is called by the generated file interface
    +// (*proto.internalFuncs) Present to optionally read an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookPresent(field *uint32, num uint32) {}
    +
    +// raceDetectHookSetPresent is called by the generated file interface
    +// (*proto.internalFuncs) SetPresent to optionally write an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
    +
    +// raceDetectHookClearPresent is called by the generated file interface
    +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookClearPresent(field *uint32, num uint32) {}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
    new file mode 100644
    index 0000000000..e9a27583ae
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
    @@ -0,0 +1,126 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +//go:build race
    +
    +package impl
    +
    +// When running under race detector, we add a presence map of bytes, that we can access
    +// in the hook functions so that we trigger the race detection whenever we have concurrent
    +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
    +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
    +type RaceDetectHookData struct {
    +	shadowPresence *[]byte
    +}
    +
    +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
    +// using non-atomic operations.
    +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
    +	sp := make([]byte, size)
    +	atomicStoreShadowPresence(&data.shadowPresence, &sp)
    +}
    +
    +func (p presence) raceDetectHookPresent(num uint32) {
    +	data := p.toRaceDetectData()
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp != nil {
    +		_ = (*sp)[num]
    +	}
    +}
    +
    +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
    +	data := p.toRaceDetectData()
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp == nil {
    +		data.raceDetectHookAlloc(size)
    +		sp = atomicLoadShadowPresence(&data.shadowPresence)
    +	}
    +	(*sp)[num] = 1
    +}
    +
    +func (p presence) raceDetectHookClearPresent(num uint32) {
    +	data := p.toRaceDetectData()
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp != nil {
    +		(*sp)[num] = 0
    +
    +	}
    +}
    +
    +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
    +// shadowPresence bytes from src to lazy.
    +func (p presence) raceDetectHookAllocAndCopy(q presence) {
    +	sData := q.toRaceDetectData()
    +	dData := p.toRaceDetectData()
    +	if sData == nil {
    +		return
    +	}
    +	srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
    +	if srcSp == nil {
    +		atomicStoreShadowPresence(&dData.shadowPresence, nil)
    +		return
    +	}
    +	n := len(*srcSp)
    +	dSlice := make([]byte, n)
    +	atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
    +	for i := 0; i < n; i++ {
    +		dSlice[i] = (*srcSp)[i]
    +	}
    +}
    +
    +// raceDetectHookPresent is called by the generated file interface
    +// (*proto.internalFuncs) Present to optionally read an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookPresent(field *uint32, num uint32) {
    +	data := findPointerToRaceDetectData(field, num)
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp != nil {
    +		_ = (*sp)[num]
    +	}
    +}
    +
    +// raceDetectHookSetPresent is called by the generated file interface
    +// (*proto.internalFuncs) SetPresent to optionally write an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
    +	data := findPointerToRaceDetectData(field, num)
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp == nil {
    +		data.raceDetectHookAlloc(size)
    +		sp = atomicLoadShadowPresence(&data.shadowPresence)
    +	}
    +	(*sp)[num] = 1
    +}
    +
    +// raceDetectHookClearPresent is called by the generated file interface
    +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
    +// shadow bitmap when race detection is enabled. In regular code it is
    +// a noop.
    +func raceDetectHookClearPresent(field *uint32, num uint32) {
    +	data := findPointerToRaceDetectData(field, num)
    +	if data == nil {
    +		return
    +	}
    +	sp := atomicLoadShadowPresence(&data.shadowPresence)
    +	if sp != nil {
    +		(*sp)[num] = 0
    +	}
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
    index bff041edc9..fe2c719ce4 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
    @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
     		}
     		return nil
     	}
    +
    +	var presence presence
    +	if mi.presenceOffset.IsValid() {
    +		presence = p.Apply(mi.presenceOffset).PresenceInfo()
    +	}
    +
     	if mi.extensionOffset.IsValid() {
     		e := p.Apply(mi.extensionOffset).Extensions()
     		if err := mi.isInitExtensions(e); err != nil {
    @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
     		if !f.isRequired && f.funcs.isInit == nil {
     			continue
     		}
    +
    +		if f.presenceIndex != noPresence {
    +			if !presence.Present(f.presenceIndex) {
    +				if f.isRequired {
    +					return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
    +				}
    +				continue
    +			}
    +			if f.funcs.isInit != nil {
    +				f.mi.init()
    +				if f.mi.needsInitCheck {
    +					if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
    +						lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
    +						if !lazy.AllowedPartial() {
    +							// Nothing to see here, it was checked on unmarshal
    +							continue
    +						}
    +						mi.lazyUnmarshal(p, f.num)
    +					}
    +					if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
    +						return err
    +					}
    +				}
    +			}
    +			continue
    +		}
    +
     		fptr := p.Apply(f.offset)
     		if f.isPointer && fptr.Elem().IsNil() {
     			if f.isRequired {
    @@ -68,7 +101,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
     	}
     	for _, x := range *ext {
     		ei := getExtensionFieldInfo(x.Type())
    -		if ei.funcs.isInit == nil {
    +		if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
     			continue
     		}
     		v := x.Value()
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
    index 2b8f122c27..0d5b546e0e 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
    @@ -67,7 +67,6 @@ type lazyExtensionValue struct {
     	xi         *extensionFieldInfo
     	value      protoreflect.Value
     	b          []byte
    -	fn         func() protoreflect.Value
     }
     
     type ExtensionField struct {
    @@ -99,6 +98,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
     	return false
     }
     
    +// isUnexpandedLazy returns true if the ExensionField is lazy and not
    +// yet expanded, which means it's present and already checked for
    +// initialized required fields.
    +func (f *ExtensionField) isUnexpandedLazy() bool {
    +	return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
    +}
    +
    +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
    +//
    +// The returned buffer has to be kept over whatever operation we're planning,
    +// as re-retrieving it will fail after the message is lazily decoded.
    +func (f *ExtensionField) lazyBuffer() []byte {
    +	// This function might be in the critical path, so check the atomic without
    +	// taking a look first, then only take the lock if needed.
    +	if !f.isUnexpandedLazy() {
    +		return nil
    +	}
    +	f.lazy.mu.Lock()
    +	defer f.lazy.mu.Unlock()
    +	return f.lazy.b
    +}
    +
     func (f *ExtensionField) lazyInit() {
     	f.lazy.mu.Lock()
     	defer f.lazy.mu.Unlock()
    @@ -136,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
     		}
     		f.lazy.value = val
     	} else {
    -		f.lazy.value = f.lazy.fn()
    +		panic("No support for lazy fns for ExtensionField")
     	}
     	f.lazy.xi = nil
    -	f.lazy.fn = nil
     	f.lazy.b = nil
     	atomic.StoreUint32(&f.lazy.atomicOnce, 1)
     }
    @@ -152,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
     	f.lazy = nil
     }
     
    -// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
    -// This must not be called concurrently.
    -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
    -	f.typ = t
    -	f.lazy = &lazyExtensionValue{fn: fn}
    -}
    -
     // Value returns the value of the extension field.
     // This may be called concurrently.
     func (f *ExtensionField) Value() protoreflect.Value {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
    index 3fadd241e1..7c1f66c8c1 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
    @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
     			if err != nil {
     				return out, err
     			}
    +			if cf.funcs.isInit == nil {
    +				out.initialized = true
    +			}
     			vi.Set(vw)
     			return out, nil
     		}
    @@ -233,9 +236,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
     }
     
     func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    +	calculatedSize := f.mi.sizePointer(p.Elem(), opts)
     	b = protowire.AppendVarint(b, f.wiretag)
    -	b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
    -	return f.mi.marshalAppendPointer(b, p.Elem(), opts)
    +	b = protowire.AppendVarint(b, uint64(calculatedSize))
    +	before := len(b)
    +	b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
    +	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
    +		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
    +	}
    +	return b, err
     }
     
     func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    @@ -262,14 +271,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
     	return f.mi.checkInitializedPointer(p.Elem())
     }
     
    -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
    -	return protowire.SizeBytes(proto.Size(m)) + tagsize
    +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int {
    +	return protowire.SizeBytes(opts.Options().Size(m)) + tagsize
     }
     
     func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
    +	mopts := opts.Options()
    +	calculatedSize := mopts.Size(m)
     	b = protowire.AppendVarint(b, wiretag)
    -	b = protowire.AppendVarint(b, uint64(proto.Size(m)))
    -	return opts.Options().MarshalAppend(b, m)
    +	b = protowire.AppendVarint(b, uint64(calculatedSize))
    +	before := len(b)
    +	b, err := mopts.MarshalAppend(b, m)
    +	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
    +		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
    +	}
    +	return b, err
     }
     
     func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
    @@ -405,8 +421,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
     	return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
     }
     
    -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
    -	return 2*tagsize + proto.Size(m)
    +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int {
    +	return 2*tagsize + opts.Options().Size(m)
     }
     
     func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
    @@ -482,10 +498,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal
     		b = protowire.AppendVarint(b, f.wiretag)
     		siz := f.mi.sizePointer(v, opts)
     		b = protowire.AppendVarint(b, uint64(siz))
    +		before := len(b)
     		b, err = f.mi.marshalAppendPointer(b, v, opts)
     		if err != nil {
     			return b, err
     		}
    +		if measuredSize := len(b) - before; siz != measuredSize {
    +			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
    +		}
     	}
     	return b, nil
     }
    @@ -520,28 +540,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
     	return nil
     }
     
    -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
    +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int {
    +	mopts := opts.Options()
     	s := p.PointerSlice()
     	n := 0
     	for _, v := range s {
     		m := asMessage(v.AsValueOf(goType.Elem()))
    -		n += protowire.SizeBytes(proto.Size(m)) + tagsize
    +		n += protowire.SizeBytes(mopts.Size(m)) + tagsize
     	}
     	return n
     }
     
     func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
    +	mopts := opts.Options()
     	s := p.PointerSlice()
     	var err error
     	for _, v := range s {
     		m := asMessage(v.AsValueOf(goType.Elem()))
     		b = protowire.AppendVarint(b, wiretag)
    -		siz := proto.Size(m)
    +		siz := mopts.Size(m)
     		b = protowire.AppendVarint(b, uint64(siz))
    -		b, err = opts.Options().MarshalAppend(b, m)
    +		before := len(b)
    +		b, err = mopts.MarshalAppend(b, m)
     		if err != nil {
     			return b, err
     		}
    +		if measuredSize := len(b) - before; siz != measuredSize {
    +			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
    +		}
     	}
     	return b, nil
     }
    @@ -582,11 +608,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error {
     // Slices of messages
     
     func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
    +	mopts := opts.Options()
     	list := listv.List()
     	n := 0
     	for i, llen := 0, list.Len(); i < llen; i++ {
     		m := list.Get(i).Message().Interface()
    -		n += protowire.SizeBytes(proto.Size(m)) + tagsize
    +		n += protowire.SizeBytes(mopts.Size(m)) + tagsize
     	}
     	return n
     }
    @@ -597,13 +624,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
     	for i, llen := 0, list.Len(); i < llen; i++ {
     		m := list.Get(i).Message().Interface()
     		b = protowire.AppendVarint(b, wiretag)
    -		siz := proto.Size(m)
    +		siz := mopts.Size(m)
     		b = protowire.AppendVarint(b, uint64(siz))
    +		before := len(b)
     		var err error
     		b, err = mopts.MarshalAppend(b, m)
     		if err != nil {
     			return b, err
     		}
    +		if measuredSize := len(b) - before; siz != measuredSize {
    +			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
    +		}
     	}
     	return b, nil
     }
    @@ -651,11 +682,12 @@ var coderMessageSliceValue = valueCoderFuncs{
     }
     
     func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
    +	mopts := opts.Options()
     	list := listv.List()
     	n := 0
     	for i, llen := 0, list.Len(); i < llen; i++ {
     		m := list.Get(i).Message().Interface()
    -		n += 2*tagsize + proto.Size(m)
    +		n += 2*tagsize + mopts.Size(m)
     	}
     	return n
     }
    @@ -738,12 +770,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type)
     	}
     }
     
    -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
    +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int {
    +	mopts := opts.Options()
     	s := p.PointerSlice()
     	n := 0
     	for _, v := range s {
     		m := asMessage(v.AsValueOf(messageType.Elem()))
    -		n += 2*tagsize + proto.Size(m)
    +		n += 2*tagsize + mopts.Size(m)
     	}
     	return n
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
    new file mode 100644
    index 0000000000..76818ea252
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
    @@ -0,0 +1,264 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"fmt"
    +	"reflect"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +	"google.golang.org/protobuf/internal/errors"
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
    +	mi := getMessageInfo(ft)
    +	if mi == nil {
    +		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
    +	}
    +	switch fd.Kind() {
    +	case protoreflect.MessageKind:
    +		return mi, pointerCoderFuncs{
    +			size:      sizeOpaqueMessage,
    +			marshal:   appendOpaqueMessage,
    +			unmarshal: consumeOpaqueMessage,
    +			isInit:    isInitOpaqueMessage,
    +			merge:     mergeOpaqueMessage,
    +		}
    +	case protoreflect.GroupKind:
    +		return mi, pointerCoderFuncs{
    +			size:      sizeOpaqueGroup,
    +			marshal:   appendOpaqueGroup,
    +			unmarshal: consumeOpaqueGroup,
    +			isInit:    isInitOpaqueMessage,
    +			merge:     mergeOpaqueMessage,
    +		}
    +	}
    +	panic("unexpected field kind")
    +}
    +
    +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    +	return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
    +}
    +
    +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    +	mp := p.AtomicGetPointer()
    +	calculatedSize := f.mi.sizePointer(mp, opts)
    +	b = protowire.AppendVarint(b, f.wiretag)
    +	b = protowire.AppendVarint(b, uint64(calculatedSize))
    +	before := len(b)
    +	b, err := f.mi.marshalAppendPointer(b, mp, opts)
    +	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
    +		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
    +	}
    +	return b, err
    +}
    +
    +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if wtyp != protowire.BytesType {
    +		return out, errUnknown
    +	}
    +	v, n := protowire.ConsumeBytes(b)
    +	if n < 0 {
    +		return out, errDecode
    +	}
    +	mp := p.AtomicGetPointer()
    +	if mp.IsNil() {
    +		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
    +	}
    +	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
    +	if err != nil {
    +		return out, err
    +	}
    +	out.n = n
    +	out.initialized = o.initialized
    +	return out, nil
    +}
    +
    +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
    +	mp := p.AtomicGetPointer()
    +	if mp.IsNil() {
    +		return nil
    +	}
    +	return f.mi.checkInitializedPointer(mp)
    +}
    +
    +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
    +	dstmp := dst.AtomicGetPointer()
    +	if dstmp.IsNil() {
    +		dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
    +	}
    +	f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
    +}
    +
    +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    +	return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
    +}
    +
    +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    +	b = protowire.AppendVarint(b, f.wiretag) // start group
    +	b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
    +	b = protowire.AppendVarint(b, f.wiretag+1) // end group
    +	return b, err
    +}
    +
    +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if wtyp != protowire.StartGroupType {
    +		return out, errUnknown
    +	}
    +	mp := p.AtomicGetPointer()
    +	if mp.IsNil() {
    +		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
    +	}
    +	o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
    +	return o, e
    +}
    +
    +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
    +	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
    +		panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
    +	}
    +	mt := ft.Elem().Elem() // *[]*T -> *T
    +	mi := getMessageInfo(mt)
    +	if mi == nil {
    +		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
    +	}
    +	switch fd.Kind() {
    +	case protoreflect.MessageKind:
    +		return mi, pointerCoderFuncs{
    +			size:      sizeOpaqueMessageSlice,
    +			marshal:   appendOpaqueMessageSlice,
    +			unmarshal: consumeOpaqueMessageSlice,
    +			isInit:    isInitOpaqueMessageSlice,
    +			merge:     mergeOpaqueMessageSlice,
    +		}
    +	case protoreflect.GroupKind:
    +		return mi, pointerCoderFuncs{
    +			size:      sizeOpaqueGroupSlice,
    +			marshal:   appendOpaqueGroupSlice,
    +			unmarshal: consumeOpaqueGroupSlice,
    +			isInit:    isInitOpaqueMessageSlice,
    +			merge:     mergeOpaqueMessageSlice,
    +		}
    +	}
    +	panic("unexpected field kind")
    +}
    +
    +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    +	s := p.AtomicGetPointer().PointerSlice()
    +	n := 0
    +	for _, v := range s {
    +		n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
    +	}
    +	return n
    +}
    +
    +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    +	s := p.AtomicGetPointer().PointerSlice()
    +	var err error
    +	for _, v := range s {
    +		b = protowire.AppendVarint(b, f.wiretag)
    +		siz := f.mi.sizePointer(v, opts)
    +		b = protowire.AppendVarint(b, uint64(siz))
    +		before := len(b)
    +		b, err = f.mi.marshalAppendPointer(b, v, opts)
    +		if err != nil {
    +			return b, err
    +		}
    +		if measuredSize := len(b) - before; siz != measuredSize {
    +			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
    +		}
    +	}
    +	return b, nil
    +}
    +
    +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if wtyp != protowire.BytesType {
    +		return out, errUnknown
    +	}
    +	v, n := protowire.ConsumeBytes(b)
    +	if n < 0 {
    +		return out, errDecode
    +	}
    +	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
    +	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
    +	if err != nil {
    +		return out, err
    +	}
    +	sp := p.AtomicGetPointer()
    +	if sp.IsNil() {
    +		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
    +	}
    +	sp.AppendPointerSlice(mp)
    +	out.n = n
    +	out.initialized = o.initialized
    +	return out, nil
    +}
    +
    +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
    +	sp := p.AtomicGetPointer()
    +	if sp.IsNil() {
    +		return nil
    +	}
    +	s := sp.PointerSlice()
    +	for _, v := range s {
    +		if err := f.mi.checkInitializedPointer(v); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
    +	ds := dst.AtomicGetPointer()
    +	if ds.IsNil() {
    +		ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
    +	}
    +	for _, sp := range src.AtomicGetPointer().PointerSlice() {
    +		dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
    +		f.mi.mergePointer(dm, sp, opts)
    +		ds.AppendPointerSlice(dm)
    +	}
    +}
    +
    +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    +	s := p.AtomicGetPointer().PointerSlice()
    +	n := 0
    +	for _, v := range s {
    +		n += 2*f.tagsize + f.mi.sizePointer(v, opts)
    +	}
    +	return n
    +}
    +
    +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    +	s := p.AtomicGetPointer().PointerSlice()
    +	var err error
    +	for _, v := range s {
    +		b = protowire.AppendVarint(b, f.wiretag) // start group
    +		b, err = f.mi.marshalAppendPointer(b, v, opts)
    +		if err != nil {
    +			return b, err
    +		}
    +		b = protowire.AppendVarint(b, f.wiretag+1) // end group
    +	}
    +	return b, nil
    +}
    +
    +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	if wtyp != protowire.StartGroupType {
    +		return out, errUnknown
    +	}
    +	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
    +	out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
    +	if err != nil {
    +		return out, err
    +	}
    +	sp := p.AtomicGetPointer()
    +	if sp.IsNil() {
    +		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
    +	}
    +	sp.AppendPointerSlice(mp)
    +	return out, err
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    index 111b9d16f9..229c698013 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
    @@ -9,6 +9,7 @@ import (
     	"sort"
     
     	"google.golang.org/protobuf/encoding/protowire"
    +	"google.golang.org/protobuf/internal/errors"
     	"google.golang.org/protobuf/internal/genid"
     	"google.golang.org/protobuf/reflect/protoreflect"
     )
    @@ -93,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
     		return 0
     	}
     	n := 0
    -	iter := mapRange(mapv)
    +	iter := mapv.MapRange()
     	for iter.Next() {
     		key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
     		keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
    @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
     		size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
     		size += mapi.valFuncs.size(val, mapValTagSize, opts)
     		b = protowire.AppendVarint(b, uint64(size))
    +		before := len(b)
     		b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
     		if err != nil {
     			return nil, err
     		}
    -		return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
    +		b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
    +		if measuredSize := len(b) - before; size != measuredSize && err == nil {
    +			return nil, errors.MismatchedSizeCalculation(size, measuredSize)
    +		}
    +		return b, err
     	} else {
     		key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
     		val := pointerOfValue(valrv)
    @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
     		}
     		b = protowire.AppendVarint(b, mapi.valWiretag)
     		b = protowire.AppendVarint(b, uint64(valSize))
    -		return f.mi.marshalAppendPointer(b, val, opts)
    +		before := len(b)
    +		b, err = f.mi.marshalAppendPointer(b, val, opts)
    +		if measuredSize := len(b) - before; valSize != measuredSize && err == nil {
    +			return nil, errors.MismatchedSizeCalculation(valSize, measuredSize)
    +		}
    +		return b, err
     	}
     }
     
    @@ -270,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o
     	if opts.Deterministic() {
     		return appendMapDeterministic(b, mapv, mapi, f, opts)
     	}
    -	iter := mapRange(mapv)
    +	iter := mapv.MapRange()
     	for iter.Next() {
     		var err error
     		b = protowire.AppendVarint(b, f.wiretag)
    @@ -317,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
     		if !mi.needsInitCheck {
     			return nil
     		}
    -		iter := mapRange(mapv)
    +		iter := mapv.MapRange()
     		for iter.Next() {
     			val := pointerOfValue(iter.Value())
     			if err := mi.checkInitializedPointer(val); err != nil {
    @@ -325,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
     			}
     		}
     	} else {
    -		iter := mapRange(mapv)
    +		iter := mapv.MapRange()
     		for iter.Next() {
     			val := mapi.conv.valConv.PBValueOf(iter.Value())
     			if err := mapi.valFuncs.isInit(val); err != nil {
    @@ -345,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
     	if dstm.IsNil() {
     		dstm.Set(reflect.MakeMap(f.ft))
     	}
    -	iter := mapRange(srcm)
    +	iter := srcm.MapRange()
     	for iter.Next() {
     		dstm.SetMapIndex(iter.Key(), iter.Value())
     	}
    @@ -360,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
     	if dstm.IsNil() {
     		dstm.Set(reflect.MakeMap(f.ft))
     	}
    -	iter := mapRange(srcm)
    +	iter := srcm.MapRange()
     	for iter.Next() {
     		dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
     	}
    @@ -375,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
     	if dstm.IsNil() {
     		dstm.Set(reflect.MakeMap(f.ft))
     	}
    -	iter := mapRange(srcm)
    +	iter := srcm.MapRange()
     	for iter.Next() {
     		val := reflect.New(f.ft.Elem().Elem())
     		if f.mi != nil {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
    deleted file mode 100644
    index 4b15493f2f..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
    +++ /dev/null
    @@ -1,38 +0,0 @@
    -// Copyright 2019 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build !go1.12
    -// +build !go1.12
    -
    -package impl
    -
    -import "reflect"
    -
    -type mapIter struct {
    -	v    reflect.Value
    -	keys []reflect.Value
    -}
    -
    -// mapRange provides a less-efficient equivalent to
    -// the Go 1.12 reflect.Value.MapRange method.
    -func mapRange(v reflect.Value) *mapIter {
    -	return &mapIter{v: v}
    -}
    -
    -func (i *mapIter) Next() bool {
    -	if i.keys == nil {
    -		i.keys = i.v.MapKeys()
    -	} else {
    -		i.keys = i.keys[1:]
    -	}
    -	return len(i.keys) > 0
    -}
    -
    -func (i *mapIter) Key() reflect.Value {
    -	return i.keys[0]
    -}
    -
    -func (i *mapIter) Value() reflect.Value {
    -	return i.v.MapIndex(i.keys[0])
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
    deleted file mode 100644
    index 0b31b66eaf..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -// Copyright 2019 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build go1.12
    -// +build go1.12
    -
    -package impl
    -
    -import "reflect"
    -
    -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
    index 6b2fdbb739..111d95833d 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
    @@ -32,6 +32,10 @@ type coderMessageInfo struct {
     	needsInitCheck     bool
     	isMessageSet       bool
     	numRequiredFields  uint8
    +
    +	lazyOffset     offset
    +	presenceOffset offset
    +	presenceSize   presenceSize
     }
     
     type coderFieldInfo struct {
    @@ -45,12 +49,19 @@ type coderFieldInfo struct {
     	tagsize    int                      // size of the varint-encoded tag
     	isPointer  bool                     // true if IsNil may be called on the struct field
     	isRequired bool                     // true if field is required
    +
    +	isLazy        bool
    +	presenceIndex uint32
     }
     
    +const noPresence = 0xffffffff
    +
     func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
     	mi.sizecacheOffset = invalidOffset
     	mi.unknownOffset = invalidOffset
     	mi.extensionOffset = invalidOffset
    +	mi.lazyOffset = invalidOffset
    +	mi.presenceOffset = si.presenceOffset
     
     	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
     		mi.sizecacheOffset = si.sizecacheOffset
    @@ -107,12 +118,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
     				},
     			}
     		case isOneof:
    -			fieldOffset = offsetOf(fs, mi.Exporter)
    +			fieldOffset = offsetOf(fs)
     		case fd.IsWeak():
     			fieldOffset = si.weakOffset
     			funcs = makeWeakMessageFieldCoder(fd)
     		default:
    -			fieldOffset = offsetOf(fs, mi.Exporter)
    +			fieldOffset = offsetOf(fs)
     			childMessage, funcs = fieldCoder(fd, ft)
     		}
     		cf := &preallocFields[i]
    @@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
     			validation: newFieldValidationInfo(mi, si, fd, ft),
     			isPointer:  fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
     			isRequired: fd.Cardinality() == protoreflect.Required,
    +
    +			presenceIndex: noPresence,
     		}
     		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
     		mi.coderFields[cf.num] = cf
    @@ -189,6 +202,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
     	if mi.methods.Merge == nil {
     		mi.methods.Merge = mi.merge
     	}
    +	if mi.methods.Equal == nil {
    +		mi.methods.Equal = equal
    +	}
     }
     
     // getUnknownBytes returns a *[]byte for the unknown fields.
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
    new file mode 100644
    index 0000000000..f81d7d0db9
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
    @@ -0,0 +1,156 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"fmt"
    +	"reflect"
    +	"sort"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +	"google.golang.org/protobuf/internal/encoding/messageset"
    +	"google.golang.org/protobuf/internal/order"
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +	piface "google.golang.org/protobuf/runtime/protoiface"
    +)
    +
    +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
    +	mi.sizecacheOffset = si.sizecacheOffset
    +	mi.unknownOffset = si.unknownOffset
    +	mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
    +	mi.extensionOffset = si.extensionOffset
    +	mi.lazyOffset = si.lazyOffset
    +	mi.presenceOffset = si.presenceOffset
    +
    +	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
    +	fields := mi.Desc.Fields()
    +	for i := 0; i < fields.Len(); i++ {
    +		fd := fields.Get(i)
    +
    +		fs := si.fieldsByNumber[fd.Number()]
    +		if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
    +			fs = si.oneofsByName[fd.ContainingOneof().Name()]
    +		}
    +		ft := fs.Type
    +		var wiretag uint64
    +		if !fd.IsPacked() {
    +			wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
    +		} else {
    +			wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
    +		}
    +		var fieldOffset offset
    +		var funcs pointerCoderFuncs
    +		var childMessage *MessageInfo
    +		switch {
    +		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    +			fieldOffset = offsetOf(fs)
    +		case fd.IsWeak():
    +			fieldOffset = si.weakOffset
    +			funcs = makeWeakMessageFieldCoder(fd)
    +		case fd.Message() != nil && !fd.IsMap():
    +			fieldOffset = offsetOf(fs)
    +			if fd.IsList() {
    +				childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
    +			} else {
    +				childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
    +			}
    +		default:
    +			fieldOffset = offsetOf(fs)
    +			childMessage, funcs = fieldCoder(fd, ft)
    +		}
    +		cf := &coderFieldInfo{
    +			num:        fd.Number(),
    +			offset:     fieldOffset,
    +			wiretag:    wiretag,
    +			ft:         ft,
    +			tagsize:    protowire.SizeVarint(wiretag),
    +			funcs:      funcs,
    +			mi:         childMessage,
    +			validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
    +			isPointer: (fd.Cardinality() == protoreflect.Repeated ||
    +				fd.Kind() == protoreflect.MessageKind ||
    +				fd.Kind() == protoreflect.GroupKind),
    +			isRequired:    fd.Cardinality() == protoreflect.Required,
    +			presenceIndex: noPresence,
    +		}
    +
    +		// TODO: Use presence for all fields.
    +		//
    +		// In some cases, such as maps, presence means only "might be set" rather
    +		// than "is definitely set", but every field should have a presence bit to
    +		// permit us to skip over definitely-unset fields at marshal time.
    +
    +		var hasPresence bool
    +		hasPresence, cf.isLazy = usePresenceForField(si, fd)
    +
    +		if hasPresence {
    +			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
    +		}
    +
    +		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
    +		mi.coderFields[cf.num] = cf
    +	}
    +	for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
    +		if od := oneofs.Get(i); !od.IsSynthetic() {
    +			mi.initOneofFieldCoders(od, si.structInfo)
    +		}
    +	}
    +	if messageset.IsMessageSet(mi.Desc) {
    +		if !mi.extensionOffset.IsValid() {
    +			panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
    +		}
    +		if !mi.unknownOffset.IsValid() {
    +			panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
    +		}
    +		mi.isMessageSet = true
    +	}
    +	sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
    +		return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
    +	})
    +
    +	var maxDense protoreflect.FieldNumber
    +	for _, cf := range mi.orderedCoderFields {
    +		if cf.num >= 16 && cf.num >= 2*maxDense {
    +			break
    +		}
    +		maxDense = cf.num
    +	}
    +	mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
    +	for _, cf := range mi.orderedCoderFields {
    +		if int(cf.num) > len(mi.denseCoderFields) {
    +			break
    +		}
    +		mi.denseCoderFields[cf.num] = cf
    +	}
    +
    +	// To preserve compatibility with historic wire output, marshal oneofs last.
    +	if mi.Desc.Oneofs().Len() > 0 {
    +		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
    +			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
    +			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
    +			return order.LegacyFieldOrder(fi, fj)
    +		})
    +	}
    +
    +	mi.needsInitCheck = needsInitCheck(mi.Desc)
    +	if mi.methods.Marshal == nil && mi.methods.Size == nil {
    +		mi.methods.Flags |= piface.SupportMarshalDeterministic
    +		mi.methods.Marshal = mi.marshal
    +		mi.methods.Size = mi.size
    +	}
    +	if mi.methods.Unmarshal == nil {
    +		mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
    +		mi.methods.Unmarshal = mi.unmarshal
    +	}
    +	if mi.methods.CheckInitialized == nil {
    +		mi.methods.CheckInitialized = mi.checkInitialized
    +	}
    +	if mi.methods.Merge == nil {
    +		mi.methods.Merge = mi.merge
    +	}
    +	if mi.methods.Equal == nil {
    +		mi.methods.Equal = equal
    +	}
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
    index b7a23faf1e..7a16ec13dd 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
    @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
     		}
     		num, _ := protowire.DecodeTag(xi.wiretag)
     		size += messageset.SizeField(num)
    +		if fullyLazyExtensions(opts) {
    +			// Don't expand the extension, instead use the buffer to calculate size
    +			if lb := x.lazyBuffer(); lb != nil {
    +				// We got hold of the buffer, so it's still lazy.
    +				// Don't count the tag size in the extension buffer, it's already added.
    +				size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
    +				continue
    +			}
    +		}
     		size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
     	}
     
    @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma
     	xi := getExtensionFieldInfo(x.Type())
     	num, _ := protowire.DecodeTag(xi.wiretag)
     	b = messageset.AppendFieldStart(b, num)
    +
    +	if fullyLazyExtensions(opts) {
    +		// Don't expand the extension if it's still in wire format, instead use the buffer content.
    +		if lb := x.lazyBuffer(); lb != nil {
    +			// The tag inside the lazy buffer is a different tag (the extension
    +			// number), but what we need here is the tag for FieldMessage:
    +			b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
    +			b = append(b, lb[xi.tagsize:]...)
    +			b = messageset.AppendFieldEnd(b)
    +			return b, nil
    +		}
    +	}
    +
     	b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
     	if err != nil {
     		return b, err
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
    deleted file mode 100644
    index 145c577bd6..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
    +++ /dev/null
    @@ -1,210 +0,0 @@
    -// Copyright 2019 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build purego || appengine
    -// +build purego appengine
    -
    -package impl
    -
    -import (
    -	"reflect"
    -
    -	"google.golang.org/protobuf/encoding/protowire"
    -)
    -
    -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
    -	v := p.v.Elem().Int()
    -	return f.tagsize + protowire.SizeVarint(uint64(v))
    -}
    -
    -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    -	v := p.v.Elem().Int()
    -	b = protowire.AppendVarint(b, f.wiretag)
    -	b = protowire.AppendVarint(b, uint64(v))
    -	return b, nil
    -}
    -
    -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
    -	if wtyp != protowire.VarintType {
    -		return out, errUnknown
    -	}
    -	v, n := protowire.ConsumeVarint(b)
    -	if n < 0 {
    -		return out, errDecode
    -	}
    -	p.v.Elem().SetInt(int64(v))
    -	out.n = n
    -	return out, nil
    -}
    -
    -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
    -	dst.v.Elem().Set(src.v.Elem())
    -}
    -
    -var coderEnum = pointerCoderFuncs{
    -	size:      sizeEnum,
    -	marshal:   appendEnum,
    -	unmarshal: consumeEnum,
    -	merge:     mergeEnum,
    -}
    -
    -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    -	if p.v.Elem().Int() == 0 {
    -		return 0
    -	}
    -	return sizeEnum(p, f, opts)
    -}
    -
    -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    -	if p.v.Elem().Int() == 0 {
    -		return b, nil
    -	}
    -	return appendEnum(b, p, f, opts)
    -}
    -
    -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
    -	if src.v.Elem().Int() != 0 {
    -		dst.v.Elem().Set(src.v.Elem())
    -	}
    -}
    -
    -var coderEnumNoZero = pointerCoderFuncs{
    -	size:      sizeEnumNoZero,
    -	marshal:   appendEnumNoZero,
    -	unmarshal: consumeEnum,
    -	merge:     mergeEnumNoZero,
    -}
    -
    -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    -	return sizeEnum(pointer{p.v.Elem()}, f, opts)
    -}
    -
    -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    -	return appendEnum(b, pointer{p.v.Elem()}, f, opts)
    -}
    -
    -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    -	if wtyp != protowire.VarintType {
    -		return out, errUnknown
    -	}
    -	if p.v.Elem().IsNil() {
    -		p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
    -	}
    -	return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
    -}
    -
    -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
    -	if !src.v.Elem().IsNil() {
    -		v := reflect.New(dst.v.Type().Elem().Elem())
    -		v.Elem().Set(src.v.Elem().Elem())
    -		dst.v.Elem().Set(v)
    -	}
    -}
    -
    -var coderEnumPtr = pointerCoderFuncs{
    -	size:      sizeEnumPtr,
    -	marshal:   appendEnumPtr,
    -	unmarshal: consumeEnumPtr,
    -	merge:     mergeEnumPtr,
    -}
    -
    -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    -	s := p.v.Elem()
    -	for i, llen := 0, s.Len(); i < llen; i++ {
    -		size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
    -	}
    -	return size
    -}
    -
    -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    -	s := p.v.Elem()
    -	for i, llen := 0, s.Len(); i < llen; i++ {
    -		b = protowire.AppendVarint(b, f.wiretag)
    -		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
    -	}
    -	return b, nil
    -}
    -
    -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
    -	s := p.v.Elem()
    -	if wtyp == protowire.BytesType {
    -		b, n := protowire.ConsumeBytes(b)
    -		if n < 0 {
    -			return out, errDecode
    -		}
    -		for len(b) > 0 {
    -			v, n := protowire.ConsumeVarint(b)
    -			if n < 0 {
    -				return out, errDecode
    -			}
    -			rv := reflect.New(s.Type().Elem()).Elem()
    -			rv.SetInt(int64(v))
    -			s.Set(reflect.Append(s, rv))
    -			b = b[n:]
    -		}
    -		out.n = n
    -		return out, nil
    -	}
    -	if wtyp != protowire.VarintType {
    -		return out, errUnknown
    -	}
    -	v, n := protowire.ConsumeVarint(b)
    -	if n < 0 {
    -		return out, errDecode
    -	}
    -	rv := reflect.New(s.Type().Elem()).Elem()
    -	rv.SetInt(int64(v))
    -	s.Set(reflect.Append(s, rv))
    -	out.n = n
    -	return out, nil
    -}
    -
    -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
    -	dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
    -}
    -
    -var coderEnumSlice = pointerCoderFuncs{
    -	size:      sizeEnumSlice,
    -	marshal:   appendEnumSlice,
    -	unmarshal: consumeEnumSlice,
    -	merge:     mergeEnumSlice,
    -}
    -
    -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
    -	s := p.v.Elem()
    -	llen := s.Len()
    -	if llen == 0 {
    -		return 0
    -	}
    -	n := 0
    -	for i := 0; i < llen; i++ {
    -		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
    -	}
    -	return f.tagsize + protowire.SizeBytes(n)
    -}
    -
    -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
    -	s := p.v.Elem()
    -	llen := s.Len()
    -	if llen == 0 {
    -		return b, nil
    -	}
    -	b = protowire.AppendVarint(b, f.wiretag)
    -	n := 0
    -	for i := 0; i < llen; i++ {
    -		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
    -	}
    -	b = protowire.AppendVarint(b, uint64(n))
    -	for i := 0; i < llen; i++ {
    -		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
    -	}
    -	return b, nil
    -}
    -
    -var coderEnumPackedSlice = pointerCoderFuncs{
    -	size:      sizeEnumPackedSlice,
    -	marshal:   appendEnumPackedSlice,
    -	unmarshal: consumeEnumSlice,
    -	merge:     mergeEnumSlice,
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
    index 757642e23c..077712c2c5 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
    @@ -2,9 +2,6 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine
    -// +build !purego,!appengine
    -
     package impl
     
     // When using unsafe pointers, we can just treat enum values as int32s.
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
    index 185ef2efa5..f72ddd882f 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
    @@ -14,7 +14,7 @@ import (
     // unwrapper unwraps the value to the underlying value.
     // This is implemented by List and Map.
     type unwrapper interface {
    -	protoUnwrap() interface{}
    +	protoUnwrap() any
     }
     
     // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
    @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
     	return protoreflect.ValueOfString(v.Convert(stringType).String())
     }
     func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
    -	// pref.Value.String never panics, so we go through an interface
    +	// protoreflect.Value.String never panics, so we go through an interface
     	// conversion here to check the type.
     	s := v.Interface().(string)
     	if c.goType.Kind() == reflect.Slice && s == "" {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
    index f89136516f..18cb96fd70 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
    @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value {
     func (ls *listReflect) IsValid() bool {
     	return !ls.v.IsNil()
     }
    -func (ls *listReflect) protoUnwrap() interface{} {
    +func (ls *listReflect) protoUnwrap() any {
     	return ls.v.Interface()
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
    index f30b0a0576..e4580b3ac2 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
    @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
     	return v
     }
     func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
    -	iter := mapRange(ms.v)
    +	iter := ms.v.MapRange()
     	for iter.Next() {
     		k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
     		v := ms.valConv.PBValueOf(iter.Value())
    @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value {
     func (ms *mapReflect) IsValid() bool {
     	return !ms.v.IsNil()
     }
    -func (ms *mapReflect) protoUnwrap() interface{} {
    +func (ms *mapReflect) protoUnwrap() any {
     	return ms.v.Interface()
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
    index cda0520c27..e0dd21fa5f 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
    @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
     		AllowPartial:   true,
     		DiscardUnknown: o.DiscardUnknown(),
     		Resolver:       o.resolver,
    +
    +		NoLazyDecoding: o.NoLazyDecoding(),
     	}
     }
     
    @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
     	return o.flags&protoiface.UnmarshalDiscardUnknown != 0
     }
     
    -func (o unmarshalOptions) IsDefault() bool {
    -	return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
    +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
    +func (o unmarshalOptions) Validated() bool   { return o.flags&protoiface.UnmarshalValidated != 0 }
    +func (o unmarshalOptions) NoLazyDecoding() bool {
    +	return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
    +}
    +
    +func (o unmarshalOptions) CanBeLazy() bool {
    +	if o.resolver != protoregistry.GlobalTypes {
    +		return false
    +	}
    +	// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
    +	return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
     }
     
     var lazyUnmarshalOptions = unmarshalOptions{
     	resolver: protoregistry.GlobalTypes,
    -	depth:    protowire.DefaultRecursionLimit,
    +
    +	flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
    +
    +	depth: protowire.DefaultRecursionLimit,
     }
     
     type unmarshalOutput struct {
    @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
     	if flags.ProtoLegacy && mi.isMessageSet {
     		return unmarshalMessageSet(mi, b, p, opts)
     	}
    +
    +	lazyDecoding := LazyEnabled() // default
    +	if opts.NoLazyDecoding() {
    +		lazyDecoding = false // explicitly disabled
    +	}
    +	if mi.lazyOffset.IsValid() && lazyDecoding {
    +		return mi.unmarshalPointerLazy(b, p, groupTag, opts)
    +	}
    +	return mi.unmarshalPointerEager(b, p, groupTag, opts)
    +}
    +
    +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
    +// The corresponding function for Lazy is in google_lazy.go.
    +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +
     	initialized := true
     	var requiredMask uint64
     	var exts *map[int32]ExtensionField
    +
    +	var presence presence
    +	if mi.presenceOffset.IsValid() {
    +		presence = p.Apply(mi.presenceOffset).PresenceInfo()
    +	}
    +
     	start := len(b)
     	for len(b) > 0 {
     		// Parse the tag (field number and wire type).
    @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
     			if f.funcs.isInit != nil && !o.initialized {
     				initialized = false
     			}
    +
    +			if f.presenceIndex != noPresence {
    +				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
    +			}
    +
     		default:
     			// Possible extension.
     			if exts == nil && mi.extensionOffset.IsValid() {
    @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
     		return out, errUnknown
     	}
     	if flags.LazyUnmarshalExtensions {
    -		if opts.IsDefault() && x.canLazy(xt) {
    +		if opts.CanBeLazy() && x.canLazy(xt) {
     			out, valid := skipExtension(b, xi, num, wtyp, opts)
     			switch valid {
     			case ValidationValid:
    @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
     		if n < 0 {
     			return out, ValidationUnknown
     		}
    +
    +		if opts.Validated() {
    +			out.initialized = true
    +			out.n = n
    +			return out, ValidationValid
    +		}
    +
     		out, st := xi.validation.mi.validate(v, 0, opts)
     		out.n = n
     		return out, st
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
    index 845c67d6e7..b2e212291d 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
    @@ -10,7 +10,8 @@ import (
     	"sync/atomic"
     
     	"google.golang.org/protobuf/internal/flags"
    -	proto "google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/internal/protolazy"
    +	"google.golang.org/protobuf/proto"
     	piface "google.golang.org/protobuf/runtime/protoiface"
     )
     
    @@ -49,8 +50,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
     		return 0
     	}
     	if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
    -		if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
    -			return int(size)
    +		// The size cache contains the size + 1, to allow the
    +		// zero value to be invalid, while also allowing for a
    +		// 0 size to be cached.
    +		if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
    +			return int(size - 1)
     		}
     	}
     	return mi.sizePointerSlow(p, opts)
    @@ -60,7 +64,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
     	if flags.ProtoLegacy && mi.isMessageSet {
     		size = sizeMessageSet(mi, p, opts)
     		if mi.sizecacheOffset.IsValid() {
    -			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
    +			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
     		}
     		return size
     	}
    @@ -68,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
     		e := p.Apply(mi.extensionOffset).Extensions()
     		size += mi.sizeExtensions(e, opts)
     	}
    +
    +	var lazy **protolazy.XXX_lazyUnmarshalInfo
    +	var presence presence
    +	if mi.presenceOffset.IsValid() {
    +		presence = p.Apply(mi.presenceOffset).PresenceInfo()
    +		if mi.lazyOffset.IsValid() {
    +			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
    +		}
    +	}
    +
     	for _, f := range mi.orderedCoderFields {
     		if f.funcs.size == nil {
     			continue
     		}
     		fptr := p.Apply(f.offset)
    +
    +		if f.presenceIndex != noPresence {
    +			if !presence.Present(f.presenceIndex) {
    +				continue
    +			}
    +
    +			if f.isLazy && fptr.AtomicGetPointer().IsNil() {
    +				if lazyFields(opts) {
    +					size += (*lazy).SizeField(uint32(f.num))
    +					continue
    +				} else {
    +					mi.lazyUnmarshal(p, f.num)
    +				}
    +			}
    +			size += f.funcs.size(fptr, f, opts)
    +			continue
    +		}
    +
     		if f.isPointer && fptr.Elem().IsNil() {
     			continue
     		}
    @@ -84,13 +116,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
     		}
     	}
     	if mi.sizecacheOffset.IsValid() {
    -		if size > math.MaxInt32 {
    +		if size > (math.MaxInt32 - 1) {
     			// The size is too large for the int32 sizecache field.
     			// We will need to recompute the size when encoding;
     			// unfortunately expensive, but better than invalid output.
    -			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
    +			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
     		} else {
    -			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
    +			// The size cache contains the size + 1, to allow the
    +			// zero value to be invalid, while also allowing for a
    +			// 0 size to be cached.
    +			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
     		}
     	}
     	return size
    @@ -128,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
     			return b, err
     		}
     	}
    +
    +	var lazy **protolazy.XXX_lazyUnmarshalInfo
    +	var presence presence
    +	if mi.presenceOffset.IsValid() {
    +		presence = p.Apply(mi.presenceOffset).PresenceInfo()
    +		if mi.lazyOffset.IsValid() {
    +			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
    +		}
    +	}
    +
     	for _, f := range mi.orderedCoderFields {
     		if f.funcs.marshal == nil {
     			continue
     		}
     		fptr := p.Apply(f.offset)
    +
    +		if f.presenceIndex != noPresence {
    +			if !presence.Present(f.presenceIndex) {
    +				continue
    +			}
    +			if f.isLazy {
    +				// Be careful, this field needs to be read atomically, like for a get
    +				if f.isPointer && fptr.AtomicGetPointer().IsNil() {
    +					if lazyFields(opts) {
    +						b, _ = (*lazy).AppendField(b, uint32(f.num))
    +						continue
    +					} else {
    +						mi.lazyUnmarshal(p, f.num)
    +					}
    +				}
    +
    +				b, err = f.funcs.marshal(b, fptr, f, opts)
    +				if err != nil {
    +					return b, err
    +				}
    +				continue
    +			} else if f.isPointer && fptr.Elem().IsNil() {
    +				continue
    +			}
    +			b, err = f.funcs.marshal(b, fptr, f, opts)
    +			if err != nil {
    +				return b, err
    +			}
    +			continue
    +		}
    +
     		if f.isPointer && fptr.Elem().IsNil() {
     			continue
     		}
    @@ -149,6 +225,22 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
     	return b, nil
     }
     
    +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
    +func fullyLazyExtensions(opts marshalOptions) bool {
    +	// When deterministic marshaling is requested, force an unmarshal for lazy
    +	// extensions to produce a deterministic result, instead of passing through
    +	// bytes lazily that may or may not match what Go Protobuf would produce.
    +	return opts.flags&piface.MarshalDeterministic == 0
    +}
    +
    +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
    +func lazyFields(opts marshalOptions) bool {
    +	// When deterministic marshaling is requested, force an unmarshal for lazy
    +	// fields to produce a deterministic result, instead of passing through
    +	// bytes lazily that may or may not match what Go Protobuf would produce.
    +	return opts.flags&piface.MarshalDeterministic == 0
    +}
    +
     func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
     	if ext == nil {
     		return 0
    @@ -158,6 +250,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha
     		if xi.funcs.size == nil {
     			continue
     		}
    +		if fullyLazyExtensions(opts) {
    +			// Don't expand the extension, instead use the buffer to calculate size
    +			if lb := x.lazyBuffer(); lb != nil {
    +				// We got hold of the buffer, so it's still lazy.
    +				n += len(lb)
    +				continue
    +			}
    +		}
     		n += xi.funcs.size(x.Value(), xi.tagsize, opts)
     	}
     	return n
    @@ -176,6 +276,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
     		var err error
     		for _, x := range *ext {
     			xi := getExtensionFieldInfo(x.Type())
    +			if fullyLazyExtensions(opts) {
    +				// Don't expand the extension if it's still in wire format, instead use the buffer content.
    +				if lb := x.lazyBuffer(); lb != nil {
    +					b = append(b, lb...)
    +					continue
    +				}
    +			}
     			b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
     		}
     		return b, err
    @@ -191,6 +298,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
     		for _, k := range keys {
     			x := (*ext)[int32(k)]
     			xi := getExtensionFieldInfo(x.Type())
    +			if fullyLazyExtensions(opts) {
    +				// Don't expand the extension if it's still in wire format, instead use the buffer content.
    +				if lb := x.lazyBuffer(); lb != nil {
    +					b = append(b, lb...)
    +					continue
    +				}
    +			}
     			b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
     			if err != nil {
     				return b, err
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
    new file mode 100644
    index 0000000000..9f6c32a7d8
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
    @@ -0,0 +1,224 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"bytes"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +	"google.golang.org/protobuf/runtime/protoiface"
    +)
    +
    +func equal(in protoiface.EqualInput) protoiface.EqualOutput {
    +	return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
    +}
    +
    +// equalMessage is a fast-path variant of protoreflect.equalMessage.
    +// It takes advantage of the internal messageState type to avoid
    +// unnecessary allocations, type assertions.
    +func equalMessage(mx, my protoreflect.Message) bool {
    +	if mx == nil || my == nil {
    +		return mx == my
    +	}
    +	if mx.Descriptor() != my.Descriptor() {
    +		return false
    +	}
    +
    +	msx, ok := mx.(*messageState)
    +	if !ok {
    +		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
    +	}
    +	msy, ok := my.(*messageState)
    +	if !ok {
    +		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
    +	}
    +
    +	mi := msx.messageInfo()
    +	miy := msy.messageInfo()
    +	if mi != miy {
    +		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
    +	}
    +	mi.init()
    +	// Compares regular fields
    +	// Modified Message.Range code that compares two messages of the same type
    +	// while going over the fields.
    +	for _, ri := range mi.rangeInfos {
    +		var fd protoreflect.FieldDescriptor
    +		var vx, vy protoreflect.Value
    +
    +		switch ri := ri.(type) {
    +		case *fieldInfo:
    +			hx := ri.has(msx.pointer())
    +			hy := ri.has(msy.pointer())
    +			if hx != hy {
    +				return false
    +			}
    +			if !hx {
    +				continue
    +			}
    +			fd = ri.fieldDesc
    +			vx = ri.get(msx.pointer())
    +			vy = ri.get(msy.pointer())
    +		case *oneofInfo:
    +			fnx := ri.which(msx.pointer())
    +			fny := ri.which(msy.pointer())
    +			if fnx != fny {
    +				return false
    +			}
    +			if fnx <= 0 {
    +				continue
    +			}
    +			fi := mi.fields[fnx]
    +			fd = fi.fieldDesc
    +			vx = fi.get(msx.pointer())
    +			vy = fi.get(msy.pointer())
    +		}
    +
    +		if !equalValue(fd, vx, vy) {
    +			return false
    +		}
    +	}
    +
    +	// Compare extensions.
    +	// This is more complicated because mx or my could have empty/nil extension maps,
    +	// however some populated extension map values are equal to nil extension maps.
    +	emx := mi.extensionMap(msx.pointer())
    +	emy := mi.extensionMap(msy.pointer())
    +	if emx != nil {
    +		for k, x := range *emx {
    +			xd := x.Type().TypeDescriptor()
    +			xv := x.Value()
    +			var y ExtensionField
    +			ok := false
    +			if emy != nil {
    +				y, ok = (*emy)[k]
    +			}
    +			// We need to treat empty lists as equal to nil values
    +			if emy == nil || !ok {
    +				if xd.IsList() && xv.List().Len() == 0 {
    +					continue
    +				}
    +				return false
    +			}
    +
    +			if !equalValue(xd, xv, y.Value()) {
    +				return false
    +			}
    +		}
    +	}
    +	if emy != nil {
    +		// emy may have extensions emx does not have, need to check them as well
    +		for k, y := range *emy {
    +			if emx != nil {
    +				// emx has the field, so we already checked it
    +				if _, ok := (*emx)[k]; ok {
    +					continue
    +				}
    +			}
    +			// Empty lists are equal to nil
    +			if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
    +				continue
    +			}
    +
    +			// Cant be equal if the extension is populated
    +			return false
    +		}
    +	}
    +
    +	return equalUnknown(mx.GetUnknown(), my.GetUnknown())
    +}
    +
    +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
    +	// slow path
    +	if fd.Kind() != protoreflect.MessageKind {
    +		return vx.Equal(vy)
    +	}
    +
    +	// fast path special cases
    +	if fd.IsMap() {
    +		if fd.MapValue().Kind() == protoreflect.MessageKind {
    +			return equalMessageMap(vx.Map(), vy.Map())
    +		}
    +		return vx.Equal(vy)
    +	}
    +
    +	if fd.IsList() {
    +		return equalMessageList(vx.List(), vy.List())
    +	}
    +
    +	return equalMessage(vx.Message(), vy.Message())
    +}
    +
    +// Mostly copied from protoreflect.equalMap.
    +// This variant only works for messages as map types.
    +// All other map types should be handled via Value.Equal.
    +func equalMessageMap(mx, my protoreflect.Map) bool {
    +	if mx.Len() != my.Len() {
    +		return false
    +	}
    +	equal := true
    +	mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
    +		if !my.Has(k) {
    +			equal = false
    +			return false
    +		}
    +		vy := my.Get(k)
    +		equal = equalMessage(vx.Message(), vy.Message())
    +		return equal
    +	})
    +	return equal
    +}
    +
    +// Mostly copied from protoreflect.equalList.
    +// The only change is the usage of equalImpl instead of protoreflect.equalValue.
    +func equalMessageList(lx, ly protoreflect.List) bool {
    +	if lx.Len() != ly.Len() {
    +		return false
    +	}
    +	for i := 0; i < lx.Len(); i++ {
    +		// We only operate on messages here since equalImpl will not call us in any other case.
    +		if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +// equalUnknown compares unknown fields by direct comparison on the raw bytes
    +// of each individual field number.
    +// Copied from protoreflect.equalUnknown.
    +func equalUnknown(x, y protoreflect.RawFields) bool {
    +	if len(x) != len(y) {
    +		return false
    +	}
    +	if bytes.Equal([]byte(x), []byte(y)) {
    +		return true
    +	}
    +
    +	mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
    +	my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
    +	for len(x) > 0 {
    +		fnum, _, n := protowire.ConsumeField(x)
    +		mx[fnum] = append(mx[fnum], x[:n]...)
    +		x = x[n:]
    +	}
    +	for len(y) > 0 {
    +		fnum, _, n := protowire.ConsumeField(y)
    +		my[fnum] = append(my[fnum], y[:n]...)
    +		y = y[n:]
    +	}
    +	if len(mx) != len(my) {
    +		return false
    +	}
    +
    +	for k, v1 := range mx {
    +		if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
    +			return false
    +		}
    +	}
    +
    +	return true
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
    index cb25b0bae1..e31249f64f 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
    @@ -53,7 +53,7 @@ type ExtensionInfo struct {
     	// type returned by InterfaceOf may not be identical.
     	//
     	// Deprecated: Use InterfaceOf(xt.Zero()) instead.
    -	ExtensionType interface{}
    +	ExtensionType any
     
     	// Field is the field number of the extension.
     	//
    @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value {
     func (xi *ExtensionInfo) Zero() protoreflect.Value {
     	return xi.lazyInit().Zero()
     }
    -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
    +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value {
     	return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
     }
    -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
    +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any {
     	return xi.lazyInit().GoValueOf(v).Interface()
     }
     func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
     	return xi.lazyInit().IsValidPB(v)
     }
    -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
    +func (xi *ExtensionInfo) IsValidInterface(v any) bool {
     	return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
     }
     func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
    new file mode 100644
    index 0000000000..e8fb6c35b4
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
    @@ -0,0 +1,433 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"fmt"
    +	"math/bits"
    +	"os"
    +	"reflect"
    +	"sort"
    +	"sync/atomic"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +	"google.golang.org/protobuf/internal/errors"
    +	"google.golang.org/protobuf/internal/protolazy"
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +	preg "google.golang.org/protobuf/reflect/protoregistry"
    +	piface "google.golang.org/protobuf/runtime/protoiface"
    +)
    +
    +var enableLazy int32 = func() int32 {
    +	if os.Getenv("GOPROTODEBUG") == "nolazy" {
    +		return 0
    +	}
    +	return 1
    +}()
    +
    +// EnableLazyUnmarshal enables lazy unmarshaling.
    +func EnableLazyUnmarshal(enable bool) {
    +	if enable {
    +		atomic.StoreInt32(&enableLazy, 1)
    +		return
    +	}
    +	atomic.StoreInt32(&enableLazy, 0)
    +}
    +
    +// LazyEnabled reports whether lazy unmarshalling is currently enabled.
    +func LazyEnabled() bool {
    +	return atomic.LoadInt32(&enableLazy) != 0
    +}
    +
    +// UnmarshalField unmarshals a field in a message.
    +func UnmarshalField(m interface{}, num protowire.Number) {
    +	switch m := m.(type) {
    +	case *messageState:
    +		m.messageInfo().lazyUnmarshal(m.pointer(), num)
    +	case *messageReflectWrapper:
    +		m.messageInfo().lazyUnmarshal(m.pointer(), num)
    +	default:
    +		panic(fmt.Sprintf("unsupported wrapper type %T", m))
    +	}
    +}
    +
    +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
    +	var f *coderFieldInfo
    +	if int(num) < len(mi.denseCoderFields) {
    +		f = mi.denseCoderFields[num]
    +	} else {
    +		f = mi.coderFields[num]
    +	}
    +	if f == nil {
    +		panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
    +	}
    +	lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
    +	start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
    +	if !found && multipleEntries == nil {
    +		panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
    +	}
    +	// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
    +	// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
    +	fp := pointerOfValue(reflect.New(f.ft))
    +	if multipleEntries != nil {
    +		for _, entry := range multipleEntries {
    +			mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
    +		}
    +	} else {
    +		mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
    +	}
    +	p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
    +}
    +
    +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
    +	opts := lazyUnmarshalOptions
    +	opts.flags |= flags
    +	for len(b) > 0 {
    +		// Parse the tag (field number and wire type).
    +		var tag uint64
    +		if b[0] < 0x80 {
    +			tag = uint64(b[0])
    +			b = b[1:]
    +		} else if len(b) >= 2 && b[1] < 128 {
    +			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
    +			b = b[2:]
    +		} else {
    +			var n int
    +			tag, n = protowire.ConsumeVarint(b)
    +			if n < 0 {
    +				return errors.New("invalid wire data")
    +			}
    +			b = b[n:]
    +		}
    +		var num protowire.Number
    +		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
    +			return errors.New("invalid wire data")
    +		} else {
    +			num = protowire.Number(n)
    +		}
    +		wtyp := protowire.Type(tag & 7)
    +		if num == f.num {
    +			o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
    +			if err == nil {
    +				b = b[o.n:]
    +				continue
    +			}
    +			if err != errUnknown {
    +				return err
    +			}
    +		}
    +		n := protowire.ConsumeFieldValue(num, wtyp, b)
    +		if n < 0 {
    +			return errors.New("invalid wire data")
    +		}
    +		b = b[n:]
    +	}
    +	return nil
    +}
    +
    +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
    +	fmi := f.validation.mi
    +	if fmi == nil {
    +		fd := mi.Desc.Fields().ByNumber(f.num)
    +		if fd == nil || !fd.IsWeak() {
    +			return out, ValidationUnknown
    +		}
    +		messageName := fd.Message().FullName()
    +		messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
    +		if err != nil {
    +			return out, ValidationUnknown
    +		}
    +		var ok bool
    +		fmi, ok = messageType.(*MessageInfo)
    +		if !ok {
    +			return out, ValidationUnknown
    +		}
    +	}
    +	fmi.init()
    +	switch f.validation.typ {
    +	case validationTypeMessage:
    +		if wtyp != protowire.BytesType {
    +			return out, ValidationWrongWireType
    +		}
    +		v, n := protowire.ConsumeBytes(b)
    +		if n < 0 {
    +			return out, ValidationInvalid
    +		}
    +		out, st := fmi.validate(v, 0, opts)
    +		out.n = n
    +		return out, st
    +	case validationTypeGroup:
    +		if wtyp != protowire.StartGroupType {
    +			return out, ValidationWrongWireType
    +		}
    +		out, st := fmi.validate(b, f.num, opts)
    +		return out, st
    +	default:
    +		return out, ValidationUnknown
    +	}
    +}
    +
    +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
    +// specifically handles lazy unmarshalling.  it expects lazyOffset and
    +// presenceOffset to both be valid.
    +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
    +	initialized := true
    +	var requiredMask uint64
    +	var lazy **protolazy.XXX_lazyUnmarshalInfo
    +	var presence presence
    +	var lazyIndex []protolazy.IndexEntry
    +	var lastNum protowire.Number
    +	outOfOrder := false
    +	lazyDecode := false
    +	presence = p.Apply(mi.presenceOffset).PresenceInfo()
    +	lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
    +	if !presence.AnyPresent(mi.presenceSize) {
    +		if opts.CanBeLazy() {
    +			// If the message contains existing data, we need to merge into it.
    +			// Lazy unmarshaling doesn't merge, so only enable it when the
    +			// message is empty (has no presence bitmap).
    +			lazyDecode = true
    +			if *lazy == nil {
    +				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
    +			}
    +			(*lazy).SetUnmarshalFlags(opts.flags)
    +			if !opts.AliasBuffer() {
    +				// Make a copy of the buffer for lazy unmarshaling.
    +				// Set the AliasBuffer flag so recursive unmarshal
    +				// operations reuse the copy.
    +				b = append([]byte{}, b...)
    +				opts.flags |= piface.UnmarshalAliasBuffer
    +			}
    +			(*lazy).SetBuffer(b)
    +		}
    +	}
    +	// Track special handling of lazy fields.
    +	//
    +	// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
    +	// In the event that validation for a field fails, this map tracks handling of the field.
    +	type lazyAction uint8
    +	const (
    +		lazyValidateOnly   lazyAction = iota // validate the field only
    +		lazyUnmarshalNow                     // eagerly unmarshal the field
    +		lazyUnmarshalLater                   // unmarshal the field after the message is fully processed
    +	)
    +	var lazyFields map[*coderFieldInfo]lazyAction
    +	var exts *map[int32]ExtensionField
    +	start := len(b)
    +	pos := 0
    +	for len(b) > 0 {
    +		// Parse the tag (field number and wire type).
    +		var tag uint64
    +		if b[0] < 0x80 {
    +			tag = uint64(b[0])
    +			b = b[1:]
    +		} else if len(b) >= 2 && b[1] < 128 {
    +			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
    +			b = b[2:]
    +		} else {
    +			var n int
    +			tag, n = protowire.ConsumeVarint(b)
    +			if n < 0 {
    +				return out, errDecode
    +			}
    +			b = b[n:]
    +		}
    +		var num protowire.Number
    +		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
    +			return out, errors.New("invalid field number")
    +		} else {
    +			num = protowire.Number(n)
    +		}
    +		wtyp := protowire.Type(tag & 7)
    +
    +		if wtyp == protowire.EndGroupType {
    +			if num != groupTag {
    +				return out, errors.New("mismatching end group marker")
    +			}
    +			groupTag = 0
    +			break
    +		}
    +
    +		var f *coderFieldInfo
    +		if int(num) < len(mi.denseCoderFields) {
    +			f = mi.denseCoderFields[num]
    +		} else {
    +			f = mi.coderFields[num]
    +		}
    +		var n int
    +		err := errUnknown
    +		discardUnknown := false
    +	Field:
    +		switch {
    +		case f != nil:
    +			if f.funcs.unmarshal == nil {
    +				break
    +			}
    +			if f.isLazy && lazyDecode {
    +				switch {
    +				case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
    +					// Attempt to validate this field and leave it for later lazy unmarshaling.
    +					o, valid := mi.skipField(b, f, wtyp, opts)
    +					switch valid {
    +					case ValidationValid:
    +						// Skip over the valid field and continue.
    +						err = nil
    +						presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
    +						requiredMask |= f.validation.requiredBit
    +						if !o.initialized {
    +							initialized = false
    +						}
    +						n = o.n
    +						break Field
    +					case ValidationInvalid:
    +						return out, errors.New("invalid proto wire format")
    +					case ValidationWrongWireType:
    +						break Field
    +					case ValidationUnknown:
    +						if lazyFields == nil {
    +							lazyFields = make(map[*coderFieldInfo]lazyAction)
    +						}
    +						if presence.Present(f.presenceIndex) {
    +							// We were unable to determine if the field is valid or not,
    +							// and we've already skipped over at least one instance of this
    +							// field. Clear the presence bit (so if we stop decoding early,
    +							// we don't leave a partially-initialized field around) and flag
    +							// the field for unmarshaling before we return.
    +							presence.ClearPresent(f.presenceIndex)
    +							lazyFields[f] = lazyUnmarshalLater
    +							discardUnknown = true
    +							break Field
    +						} else {
    +							// We were unable to determine if the field is valid or not,
    +							// but this is the first time we've seen it. Flag it as needing
    +							// eager unmarshaling and fall through to the eager unmarshal case below.
    +							lazyFields[f] = lazyUnmarshalNow
    +						}
    +					}
    +				case lazyFields[f] == lazyUnmarshalLater:
    +					// This field will be unmarshaled in a separate pass below.
    +					// Skip over it here.
    +					discardUnknown = true
    +					break Field
    +				default:
    +					// Eagerly unmarshal the field.
    +				}
    +			}
    +			if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
    +				if p.Apply(f.offset).AtomicGetPointer().IsNil() {
    +					mi.lazyUnmarshal(p, f.num)
    +				}
    +			}
    +			var o unmarshalOutput
    +			o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
    +			n = o.n
    +			if err != nil {
    +				break
    +			}
    +			requiredMask |= f.validation.requiredBit
    +			if f.funcs.isInit != nil && !o.initialized {
    +				initialized = false
    +			}
    +			if f.presenceIndex != noPresence {
    +				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
    +			}
    +		default:
    +			// Possible extension.
    +			if exts == nil && mi.extensionOffset.IsValid() {
    +				exts = p.Apply(mi.extensionOffset).Extensions()
    +				if *exts == nil {
    +					*exts = make(map[int32]ExtensionField)
    +				}
    +			}
    +			if exts == nil {
    +				break
    +			}
    +			var o unmarshalOutput
    +			o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
    +			if err != nil {
    +				break
    +			}
    +			n = o.n
    +			if !o.initialized {
    +				initialized = false
    +			}
    +		}
    +		if err != nil {
    +			if err != errUnknown {
    +				return out, err
    +			}
    +			n = protowire.ConsumeFieldValue(num, wtyp, b)
    +			if n < 0 {
    +				return out, errDecode
    +			}
    +			if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
    +				u := mi.mutableUnknownBytes(p)
    +				*u = protowire.AppendTag(*u, num, wtyp)
    +				*u = append(*u, b[:n]...)
    +			}
    +		}
    +		b = b[n:]
    +		end := start - len(b)
    +		if lazyDecode && f != nil && f.isLazy {
    +			if num != lastNum {
    +				lazyIndex = append(lazyIndex, protolazy.IndexEntry{
    +					FieldNum: uint32(num),
    +					Start:    uint32(pos),
    +					End:      uint32(end),
    +				})
    +			} else {
    +				i := len(lazyIndex) - 1
    +				lazyIndex[i].End = uint32(end)
    +				lazyIndex[i].MultipleContiguous = true
    +			}
    +		}
    +		if num < lastNum {
    +			outOfOrder = true
    +		}
    +		pos = end
    +		lastNum = num
    +	}
    +	if groupTag != 0 {
    +		return out, errors.New("missing end group marker")
    +	}
    +	if lazyFields != nil {
    +		// Some fields failed validation, and now need to be unmarshaled.
    +		for f, action := range lazyFields {
    +			if action != lazyUnmarshalLater {
    +				continue
    +			}
    +			initialized = false
    +			if *lazy == nil {
    +				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
    +			}
    +			if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
    +				return out, err
    +			}
    +			presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
    +		}
    +	}
    +	if lazyDecode {
    +		if outOfOrder {
    +			sort.Slice(lazyIndex, func(i, j int) bool {
    +				return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
    +					(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
    +						lazyIndex[i].Start < lazyIndex[j].Start)
    +			})
    +		}
    +		if *lazy == nil {
    +			*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
    +		}
    +
    +		(*lazy).SetIndex(lazyIndex)
    +	}
    +	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
    +		initialized = false
    +	}
    +	if initialized {
    +		out.initialized = true
    +	}
    +	out.n = start - len(b)
    +	return out, nil
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
    index c2a803bb2f..81b2b1a763 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
    @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber {
     func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
     	return e
     }
    -func (e *legacyEnumWrapper) protoUnwrap() interface{} {
    +func (e *legacyEnumWrapper) protoUnwrap() any {
     	v := reflect.New(e.goTyp).Elem()
     	v.SetInt(int64(e.num))
     	return v.Interface()
    @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor {
     	ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
     	ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
     	ed.L0.ParentFile = filedesc.SurrogateProto3
    +	ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures
     	ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
     
     	// TODO: Use the presence of a UnmarshalJSON method to determine proto2?
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
    index 87b30d0504..b6849d6692 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
    @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() {
     	xd.L1.Number = protoreflect.FieldNumber(xi.Field)
     	xd.L1.Cardinality = fd.L1.Cardinality
     	xd.L1.Kind = fd.L1.Kind
    -	xd.L2.IsPacked = fd.L1.IsPacked
    +	xd.L1.EditionFeatures = fd.L1.EditionFeatures
     	xd.L2.Default = fd.L1.Default
     	xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
     	xd.L2.Enum = ed
    @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
     func (x placeholderExtension) HasOptionalKeyword() bool                           { return false }
     func (x placeholderExtension) IsExtension() bool                                  { return true }
     func (x placeholderExtension) IsWeak() bool                                       { return false }
    +func (x placeholderExtension) IsLazy() bool                                       { return false }
     func (x placeholderExtension) IsPacked() bool                                     { return false }
     func (x placeholderExtension) IsList() bool                                       { return false }
     func (x placeholderExtension) IsMap() bool                                        { return false }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
    index 9ab091086c..b649f1124b 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
    @@ -7,7 +7,7 @@ package impl
     import (
     	"bytes"
     	"compress/gzip"
    -	"io/ioutil"
    +	"io"
     	"sync"
     
     	"google.golang.org/protobuf/internal/filedesc"
    @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor {
     	if err != nil {
     		panic(err)
     	}
    -	b2, err := ioutil.ReadAll(zr)
    +	b2, err := io.ReadAll(zr)
     	if err != nil {
     		panic(err)
     	}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
    index 2ab2c62978..bf0b6049b4 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
    @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
     		}
     	}
     
    +	md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures
     	// Obtain a list of oneof wrapper types.
     	var oneofWrappers []reflect.Type
     	methods := make([]reflect.Method, 0, 2)
    @@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
     	}
     	for _, fn := range methods {
     		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
    -			if vs, ok := v.Interface().([]interface{}); ok {
    +			if vs, ok := v.Interface().([]any); ok {
     				for _, v := range vs {
     					oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
     				}
    @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
     			od := &md.L2.Oneofs.List[n]
     			od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
     			od.L0.ParentFile = md.L0.ParentFile
    +			od.L1.EditionFeatures = md.L1.EditionFeatures
     			od.L0.Parent = md
     			od.L0.Index = n
     
    @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
     						aberrantAppendField(md, f.Type, tag, "", "")
     						fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
     						fd.L1.ContainingOneof = od
    +						fd.L1.EditionFeatures = od.L1.EditionFeatures
     						od.L1.Fields.List = append(od.L1.Fields.List, fd)
     					}
     				}
    @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
     	fd.L0.Parent = md
     	fd.L0.Index = n
     
    -	if fd.L1.IsWeak || fd.L1.HasPacked {
    +	if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
     		fd.L1.Options = func() protoreflect.ProtoMessage {
     			opts := descopts.Field.ProtoReflect().New()
     			if fd.L1.IsWeak {
     				opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
     			}
    -			if fd.L1.HasPacked {
    -				opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
    +			if fd.L1.EditionFeatures.IsPacked {
    +				opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
     			}
     			return opts.Interface()
     		}
    @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
     				md2.L0.ParentFile = md.L0.ParentFile
     				md2.L0.Parent = md
     				md2.L0.Index = n
    +				md2.L1.EditionFeatures = md.L1.EditionFeatures
     
     				md2.L1.IsMapEntry = true
     				md2.L2.Options = func() protoreflect.ProtoMessage {
    @@ -563,6 +567,6 @@ func (m aberrantMessage) IsValid() bool {
     func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
     	return aberrantProtoMethods
     }
    -func (m aberrantMessage) protoUnwrap() interface{} {
    +func (m aberrantMessage) protoUnwrap() any {
     	return m.v.Interface()
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
    index 7e65f64f28..8ffdce67d3 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
    @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
     	if src.IsNil() {
     		return
     	}
    +
    +	var presenceSrc presence
    +	var presenceDst presence
    +	if mi.presenceOffset.IsValid() {
    +		presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
    +		presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
    +	}
    +
     	for _, f := range mi.orderedCoderFields {
     		if f.funcs.merge == nil {
     			continue
     		}
     		sfptr := src.Apply(f.offset)
    +
    +		if f.presenceIndex != noPresence {
    +			if !presenceSrc.Present(f.presenceIndex) {
    +				continue
    +			}
    +			dfptr := dst.Apply(f.offset)
    +			if f.isLazy {
    +				if sfptr.AtomicGetPointer().IsNil() {
    +					mi.lazyUnmarshal(src, f.num)
    +				}
    +				if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
    +					mi.lazyUnmarshal(dst, f.num)
    +				}
    +			}
    +			f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
    +			presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
    +			continue
    +		}
    +
     		if f.isPointer && sfptr.Elem().IsNil() {
     			continue
     		}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
    index 629bacdced..d1f79b4224 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/message.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
    @@ -30,12 +30,12 @@ type MessageInfo struct {
     	// Desc is the underlying message descriptor type and must be populated.
     	Desc protoreflect.MessageDescriptor
     
    -	// Exporter must be provided in a purego environment in order to provide
    -	// access to unexported fields.
    +	// Deprecated: Exporter will be removed the next time we bump
    +	// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
     	Exporter exporter
     
     	// OneofWrappers is list of pointers to oneof wrapper struct types.
    -	OneofWrappers []interface{}
    +	OneofWrappers []any
     
     	initMu   sync.Mutex // protects all unexported fields
     	initDone uint32
    @@ -47,7 +47,7 @@ type MessageInfo struct {
     // exporter is a function that returns a reference to the ith field of v,
     // where v is a pointer to a struct. It returns nil if it does not support
     // exporting the requested field (e.g., already exported).
    -type exporter func(v interface{}, i int) interface{}
    +type exporter func(v any, i int) any
     
     // getMessageInfo returns the MessageInfo for any message type that
     // is generated by our implementation of protoc-gen-go (for v2 and on).
    @@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() {
     	if mi.initDone == 1 {
     		return
     	}
    +	if opaqueInitHook(mi) {
    +		return
    +	}
     
     	t := mi.GoReflectType
     	if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
    @@ -133,6 +136,9 @@ type structInfo struct {
     	extensionOffset offset
     	extensionType   reflect.Type
     
    +	lazyOffset     offset
    +	presenceOffset offset
    +
     	fieldsByNumber        map[protoreflect.FieldNumber]reflect.StructField
     	oneofsByName          map[protoreflect.Name]reflect.StructField
     	oneofWrappersByType   map[reflect.Type]protoreflect.FieldNumber
    @@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
     		weakOffset:      invalidOffset,
     		unknownOffset:   invalidOffset,
     		extensionOffset: invalidOffset,
    +		lazyOffset:      invalidOffset,
    +		presenceOffset:  invalidOffset,
     
     		fieldsByNumber:        map[protoreflect.FieldNumber]reflect.StructField{},
     		oneofsByName:          map[protoreflect.Name]reflect.StructField{},
    @@ -157,24 +165,28 @@ fieldLoop:
     		switch f := t.Field(i); f.Name {
     		case genid.SizeCache_goname, genid.SizeCacheA_goname:
     			if f.Type == sizecacheType {
    -				si.sizecacheOffset = offsetOf(f, mi.Exporter)
    +				si.sizecacheOffset = offsetOf(f)
     				si.sizecacheType = f.Type
     			}
     		case genid.WeakFields_goname, genid.WeakFieldsA_goname:
     			if f.Type == weakFieldsType {
    -				si.weakOffset = offsetOf(f, mi.Exporter)
    +				si.weakOffset = offsetOf(f)
     				si.weakType = f.Type
     			}
     		case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
     			if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
    -				si.unknownOffset = offsetOf(f, mi.Exporter)
    +				si.unknownOffset = offsetOf(f)
     				si.unknownType = f.Type
     			}
     		case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
     			if f.Type == extensionFieldsType {
    -				si.extensionOffset = offsetOf(f, mi.Exporter)
    +				si.extensionOffset = offsetOf(f)
     				si.extensionType = f.Type
     			}
    +		case "lazyFields", "XXX_lazyUnmarshalInfo":
    +			si.lazyOffset = offsetOf(f)
    +		case "XXX_presence":
    +			si.presenceOffset = offsetOf(f)
     		default:
     			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
     				if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
    @@ -201,7 +213,7 @@ fieldLoop:
     	}
     	for _, fn := range methods {
     		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
    -			if vs, ok := v.Interface().([]interface{}); ok {
    +			if vs, ok := v.Interface().([]any); ok {
     				oneofWrappers = vs
     			}
     		}
    @@ -256,7 +268,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
     
     type mapEntryType struct {
     	desc    protoreflect.MessageDescriptor
    -	valType interface{} // zero value of enum or message type
    +	valType any // zero value of enum or message type
     }
     
     func (mt mapEntryType) New() protoreflect.Message {
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
    new file mode 100644
    index 0000000000..d8dcd78863
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
    @@ -0,0 +1,632 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"fmt"
    +	"math"
    +	"reflect"
    +	"strings"
    +	"sync/atomic"
    +
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +type opaqueStructInfo struct {
    +	structInfo
    +}
    +
    +// isOpaque determines whether a protobuf message type is on the Opaque API.  It
    +// checks whether the type is a Go struct that protoc-gen-go would generate.
    +//
    +// This function only detects newly generated messages from the v2
    +// implementation of protoc-gen-go. It is unable to classify generated messages
    +// that are too old or those that are generated by a different generator
    +// such as protoc-gen-gogo.
    +func isOpaque(t reflect.Type) bool {
    +	// The current detection mechanism is to simply check the first field
    +	// for a struct tag with the "protogen" key.
    +	if t.Kind() == reflect.Struct && t.NumField() > 0 {
    +		pgt := t.Field(0).Tag.Get("protogen")
    +		return strings.HasPrefix(pgt, "opaque.")
    +	}
    +	return false
    +}
    +
    +func opaqueInitHook(mi *MessageInfo) bool {
    +	mt := mi.GoReflectType.Elem()
    +	si := opaqueStructInfo{
    +		structInfo: mi.makeStructInfo(mt),
    +	}
    +
    +	if !isOpaque(mt) {
    +		return false
    +	}
    +
    +	defer atomic.StoreUint32(&mi.initDone, 1)
    +
    +	mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
    +	fds := mi.Desc.Fields()
    +	for i := 0; i < fds.Len(); i++ {
    +		fd := fds.Get(i)
    +		fs := si.fieldsByNumber[fd.Number()]
    +		var fi fieldInfo
    +		usePresence, _ := usePresenceForField(si, fd)
    +
    +		switch {
    +		case fd.IsWeak():
    +			// Weak fields are no different for opaque.
    +			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
    +		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    +			// Oneofs are no different for opaque.
    +			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
    +		case fd.IsMap():
    +			fi = mi.fieldInfoForMapOpaque(si, fd, fs)
    +		case fd.IsList() && fd.Message() == nil && usePresence:
    +			fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
    +		case fd.IsList() && fd.Message() == nil:
    +			// Proto3 lists without presence can use same access methods as open
    +			fi = fieldInfoForList(fd, fs, mi.Exporter)
    +		case fd.IsList() && usePresence:
    +			fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
    +		case fd.IsList():
    +			// Proto3 opaque messages that does not need presence bitmap.
    +			// Different representation than open struct, but same logic
    +			fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
    +		case fd.Message() != nil && usePresence:
    +			fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
    +		case fd.Message() != nil:
    +			// Proto3 messages without presence can use same access methods as open
    +			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
    +		default:
    +			fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
    +		}
    +		mi.fields[fd.Number()] = &fi
    +	}
    +	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
    +	for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
    +		od := mi.Desc.Oneofs().Get(i)
    +		mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
    +	}
    +
    +	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
    +	for i := 0; i < fds.Len(); i++ {
    +		if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
    +			mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
    +		}
    +	}
    +
    +	for i := 0; i < fds.Len(); {
    +		fd := fds.Get(i)
    +		if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
    +			mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
    +			i += od.Fields().Len()
    +		} else {
    +			mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
    +			i++
    +		}
    +	}
    +
    +	mi.makeExtensionFieldsFunc(mt, si.structInfo)
    +	mi.makeUnknownFieldsFunc(mt, si.structInfo)
    +	mi.makeOpaqueCoderMethods(mt, si)
    +	mi.makeFieldTypes(si.structInfo)
    +
    +	return true
    +}
    +
    +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
    +	oi := &oneofInfo{oneofDesc: od}
    +	if od.IsSynthetic() {
    +		fd := od.Fields().Get(0)
    +		index, _ := presenceIndex(mi.Desc, fd)
    +		oi.which = func(p pointer) protoreflect.FieldNumber {
    +			if p.IsNil() {
    +				return 0
    +			}
    +			if !mi.present(p, index) {
    +				return 0
    +			}
    +			return od.Fields().Get(0).Number()
    +		}
    +		return oi
    +	}
    +	// Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
    +	return makeOneofInfo(od, si, x)
    +}
    +
    +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	if ft.Kind() != reflect.Map {
    +		panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
    +	}
    +	fieldOffset := offsetOf(fs)
    +	conv := NewConverter(ft, fd)
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			// Don't bother checking presence bits, since we need to
    +			// look at the map length even if the presence bit is set.
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			return rv.Len() > 0
    +		},
    +		clear: func(p pointer) {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			rv.Set(reflect.Zero(rv.Type()))
    +		},
    +		get: func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if rv.Len() == 0 {
    +				return conv.Zero()
    +			}
    +			return conv.PBValueOf(rv)
    +		},
    +		set: func(p pointer, v protoreflect.Value) {
    +			pv := conv.GoValueOf(v)
    +			if pv.IsNil() {
    +				panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			rv.Set(pv)
    +		},
    +		mutable: func(p pointer) protoreflect.Value {
    +			v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if v.IsNil() {
    +				v.Set(reflect.MakeMap(fs.Type))
    +			}
    +			return conv.PBValueOf(v)
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	if ft.Kind() != reflect.Slice {
    +		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
    +	}
    +	conv := NewConverter(reflect.PtrTo(ft), fd)
    +	fieldOffset := offsetOf(fs)
    +	index, _ := presenceIndex(mi.Desc, fd)
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			return rv.Len() > 0
    +		},
    +		clear: func(p pointer) {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			rv.Set(reflect.Zero(rv.Type()))
    +		},
    +		get: func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
    +			if rv.Elem().Len() == 0 {
    +				return conv.Zero()
    +			}
    +			return conv.PBValueOf(rv)
    +		},
    +		set: func(p pointer, v protoreflect.Value) {
    +			pv := conv.GoValueOf(v)
    +			if pv.IsNil() {
    +				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
    +			}
    +			mi.setPresent(p, index)
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			rv.Set(pv.Elem())
    +		},
    +		mutable: func(p pointer) protoreflect.Value {
    +			mi.setPresent(p, index)
    +			return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
    +		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
    +	}
    +	conv := NewConverter(ft, fd)
    +	fieldOffset := offsetOf(fs)
    +	index, _ := presenceIndex(mi.Desc, fd)
    +	fieldNumber := fd.Number()
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			if !mi.present(p, index) {
    +				return false
    +			}
    +			sp := p.Apply(fieldOffset).AtomicGetPointer()
    +			if sp.IsNil() {
    +				// Lazily unmarshal this field.
    +				mi.lazyUnmarshal(p, fieldNumber)
    +				sp = p.Apply(fieldOffset).AtomicGetPointer()
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			return rv.Elem().Len() > 0
    +		},
    +		clear: func(p pointer) {
    +			fp := p.Apply(fieldOffset)
    +			sp := fp.AtomicGetPointer()
    +			if sp.IsNil() {
    +				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
    +				mi.setPresent(p, index)
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
    +		},
    +		get: func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			if !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			sp := p.Apply(fieldOffset).AtomicGetPointer()
    +			if sp.IsNil() {
    +				// Lazily unmarshal this field.
    +				mi.lazyUnmarshal(p, fieldNumber)
    +				sp = p.Apply(fieldOffset).AtomicGetPointer()
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			if rv.Elem().Len() == 0 {
    +				return conv.Zero()
    +			}
    +			return conv.PBValueOf(rv)
    +		},
    +		set: func(p pointer, v protoreflect.Value) {
    +			fp := p.Apply(fieldOffset)
    +			sp := fp.AtomicGetPointer()
    +			if sp.IsNil() {
    +				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
    +				mi.setPresent(p, index)
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			val := conv.GoValueOf(v)
    +			if val.IsNil() {
    +				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
    +			} else {
    +				rv.Elem().Set(val.Elem())
    +			}
    +		},
    +		mutable: func(p pointer) protoreflect.Value {
    +			fp := p.Apply(fieldOffset)
    +			sp := fp.AtomicGetPointer()
    +			if sp.IsNil() {
    +				if mi.present(p, index) {
    +					// Lazily unmarshal this field.
    +					mi.lazyUnmarshal(p, fieldNumber)
    +					sp = p.Apply(fieldOffset).AtomicGetPointer()
    +				} else {
    +					sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
    +					mi.setPresent(p, index)
    +				}
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			return conv.PBValueOf(rv)
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
    +		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
    +	}
    +	conv := NewConverter(ft, fd)
    +	fieldOffset := offsetOf(fs)
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			sp := p.Apply(fieldOffset).AtomicGetPointer()
    +			if sp.IsNil() {
    +				return false
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			return rv.Elem().Len() > 0
    +		},
    +		clear: func(p pointer) {
    +			sp := p.Apply(fieldOffset).AtomicGetPointer()
    +			if !sp.IsNil() {
    +				rv := sp.AsValueOf(fs.Type.Elem())
    +				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
    +			}
    +		},
    +		get: func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			sp := p.Apply(fieldOffset).AtomicGetPointer()
    +			if sp.IsNil() {
    +				return conv.Zero()
    +			}
    +			rv := sp.AsValueOf(fs.Type.Elem())
    +			if rv.Elem().Len() == 0 {
    +				return conv.Zero()
    +			}
    +			return conv.PBValueOf(rv)
    +		},
    +		set: func(p pointer, v protoreflect.Value) {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if rv.IsNil() {
    +				rv.Set(reflect.New(fs.Type.Elem()))
    +			}
    +			val := conv.GoValueOf(v)
    +			if val.IsNil() {
    +				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
    +			} else {
    +				rv.Elem().Set(val.Elem())
    +			}
    +		},
    +		mutable: func(p pointer) protoreflect.Value {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if rv.IsNil() {
    +				rv.Set(reflect.New(fs.Type.Elem()))
    +			}
    +			return conv.PBValueOf(rv)
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	nullable := fd.HasPresence()
    +	if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
    +		nullable = true
    +	}
    +	deref := false
    +	if nullable && ft.Kind() == reflect.Ptr {
    +		ft = ft.Elem()
    +		deref = true
    +	}
    +	conv := NewConverter(ft, fd)
    +	fieldOffset := offsetOf(fs)
    +	index, _ := presenceIndex(mi.Desc, fd)
    +	var getter func(p pointer) protoreflect.Value
    +	if !nullable {
    +		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
    +	} else {
    +		getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
    +	}
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			if nullable {
    +				return mi.present(p, index)
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			switch rv.Kind() {
    +			case reflect.Bool:
    +				return rv.Bool()
    +			case reflect.Int32, reflect.Int64:
    +				return rv.Int() != 0
    +			case reflect.Uint32, reflect.Uint64:
    +				return rv.Uint() != 0
    +			case reflect.Float32, reflect.Float64:
    +				return rv.Float() != 0 || math.Signbit(rv.Float())
    +			case reflect.String, reflect.Slice:
    +				return rv.Len() > 0
    +			default:
    +				panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
    +			}
    +		},
    +		clear: func(p pointer) {
    +			if nullable {
    +				mi.clearPresent(p, index)
    +			}
    +			// This is only valuable for bytes and strings, but we do it unconditionally.
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			rv.Set(reflect.Zero(rv.Type()))
    +		},
    +		get: getter,
    +		// TODO: Implement unsafe fast path for set?
    +		set: func(p pointer, v protoreflect.Value) {
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			if deref {
    +				if rv.IsNil() {
    +					rv.Set(reflect.New(ft))
    +				}
    +				rv = rv.Elem()
    +			}
    +
    +			rv.Set(conv.GoValueOf(v))
    +			if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
    +				rv.Set(emptyBytes)
    +			}
    +			if nullable {
    +				mi.setPresent(p, index)
    +			}
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
    +	ft := fs.Type
    +	conv := NewConverter(ft, fd)
    +	fieldOffset := offsetOf(fs)
    +	index, _ := presenceIndex(mi.Desc, fd)
    +	fieldNumber := fd.Number()
    +	elemType := fs.Type.Elem()
    +	return fieldInfo{
    +		fieldDesc: fd,
    +		has: func(p pointer) bool {
    +			if p.IsNil() {
    +				return false
    +			}
    +			return mi.present(p, index)
    +		},
    +		clear: func(p pointer) {
    +			mi.clearPresent(p, index)
    +			p.Apply(fieldOffset).AtomicSetNilPointer()
    +		},
    +		get: func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			fp := p.Apply(fieldOffset)
    +			mp := fp.AtomicGetPointer()
    +			if mp.IsNil() {
    +				// Lazily unmarshal this field.
    +				mi.lazyUnmarshal(p, fieldNumber)
    +				mp = fp.AtomicGetPointer()
    +			}
    +			rv := mp.AsValueOf(elemType)
    +			return conv.PBValueOf(rv)
    +		},
    +		set: func(p pointer, v protoreflect.Value) {
    +			val := pointerOfValue(conv.GoValueOf(v))
    +			if val.IsNil() {
    +				panic("invalid nil pointer")
    +			}
    +			p.Apply(fieldOffset).AtomicSetPointer(val)
    +			mi.setPresent(p, index)
    +		},
    +		mutable: func(p pointer) protoreflect.Value {
    +			fp := p.Apply(fieldOffset)
    +			mp := fp.AtomicGetPointer()
    +			if mp.IsNil() {
    +				if mi.present(p, index) {
    +					// Lazily unmarshal this field.
    +					mi.lazyUnmarshal(p, fieldNumber)
    +					mp = fp.AtomicGetPointer()
    +				} else {
    +					mp = pointerOfValue(conv.GoValueOf(conv.New()))
    +					fp.AtomicSetPointer(mp)
    +					mi.setPresent(p, index)
    +				}
    +			}
    +			return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
    +		},
    +		newMessage: func() protoreflect.Message {
    +			return conv.New().Message()
    +		},
    +		newField: func() protoreflect.Value {
    +			return conv.New()
    +		},
    +	}
    +}
    +
    +// A presenceList wraps a List, updating presence bits as necessary when the
    +// list contents change.
    +type presenceList struct {
    +	pvalueList
    +	setPresence func(bool)
    +}
    +type pvalueList interface {
    +	protoreflect.List
    +	//Unwrapper
    +}
    +
    +func (list presenceList) Append(v protoreflect.Value) {
    +	list.pvalueList.Append(v)
    +	list.setPresence(true)
    +}
    +func (list presenceList) Truncate(i int) {
    +	list.pvalueList.Truncate(i)
    +	list.setPresence(i > 0)
    +}
    +
    +// presenceIndex returns the index to pass to presence functions.
    +//
    +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
    +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
    +	found := false
    +	var index, numIndices uint32
    +	for i := 0; i < md.Fields().Len(); i++ {
    +		f := md.Fields().Get(i)
    +		if f == fd {
    +			found = true
    +			index = numIndices
    +		}
    +		if f.ContainingOneof() == nil || isLastOneofField(f) {
    +			numIndices++
    +		}
    +	}
    +	if !found {
    +		panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
    +	}
    +	return index, presenceSize(numIndices)
    +}
    +
    +func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
    +	fields := fd.ContainingOneof().Fields()
    +	return fields.Get(fields.Len()-1) == fd
    +}
    +
    +func (mi *MessageInfo) setPresent(p pointer, index uint32) {
    +	p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
    +}
    +
    +func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
    +	p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
    +}
    +
    +func (mi *MessageInfo) present(p pointer, index uint32) bool {
    +	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
    +}
    +
    +// usePresenceForField implements the somewhat intricate logic of when
    +// the presence bitmap is used for a field.  The main logic is that a
    +// field that is optional or that can be lazy will use the presence
    +// bit, but for proto2, also maps have a presence bit. It also records
    +// if the field can ever be lazy, which is true if we have a
    +// lazyOffset and the field is a message or a slice of messages. A
    +// field that is lazy will always need a presence bit.  Oneofs are not
    +// lazy and do not use presence, unless they are a synthetic oneof,
    +// which is a proto3 optional field. For proto3 optionals, we use the
    +// presence and they can also be lazy when applicable (a message).
    +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
    +	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
    +
    +	// Non-oneof scalar fields with explicit field presence use the presence array.
    +	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
    +	switch {
    +	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
    +		return false, false
    +	case fd.IsWeak():
    +		return false, false
    +	case fd.IsMap():
    +		return false, false
    +	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
    +		return hasLazyField, hasLazyField
    +	default:
    +		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
    +	}
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
    new file mode 100644
    index 0000000000..a69825699a
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
    @@ -0,0 +1,132 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by generate-types. DO NOT EDIT.
    +
    +package impl
    +
    +import (
    +	"reflect"
    +
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
    +	ft := fs.Type
    +	if ft.Kind() == reflect.Ptr {
    +		ft = ft.Elem()
    +	}
    +	if fd.Kind() == protoreflect.EnumKind {
    +		// Enums for nullable opaque types.
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			return conv.PBValueOf(rv)
    +		}
    +	}
    +	switch ft.Kind() {
    +	case reflect.Bool:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Bool()
    +			return protoreflect.ValueOfBool(*x)
    +		}
    +	case reflect.Int32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int32()
    +			return protoreflect.ValueOfInt32(*x)
    +		}
    +	case reflect.Uint32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint32()
    +			return protoreflect.ValueOfUint32(*x)
    +		}
    +	case reflect.Int64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int64()
    +			return protoreflect.ValueOfInt64(*x)
    +		}
    +	case reflect.Uint64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint64()
    +			return protoreflect.ValueOfUint64(*x)
    +		}
    +	case reflect.Float32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float32()
    +			return protoreflect.ValueOfFloat32(*x)
    +		}
    +	case reflect.Float64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float64()
    +			return protoreflect.ValueOfFloat64(*x)
    +		}
    +	case reflect.String:
    +		if fd.Kind() == protoreflect.BytesKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() || !mi.present(p, index) {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).StringPtr()
    +				if *x == nil {
    +					return conv.Zero()
    +				}
    +				if len(**x) == 0 {
    +					return protoreflect.ValueOfBytes(nil)
    +				}
    +				return protoreflect.ValueOfBytes([]byte(**x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).StringPtr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfString(**x)
    +		}
    +	case reflect.Slice:
    +		if fd.Kind() == protoreflect.StringKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() || !mi.present(p, index) {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).Bytes()
    +				return protoreflect.ValueOfString(string(*x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() || !mi.present(p, index) {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Bytes()
    +			return protoreflect.ValueOfBytes(*x)
    +		}
    +	}
    +	panic("unexpected protobuf kind: " + ft.Kind().String())
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
    index d9ea010bef..31c19b54f8 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
    @@ -20,7 +20,7 @@ type reflectMessageInfo struct {
     	// fieldTypes contains the zero value of an enum or message field.
     	// For lists, it contains the element type.
     	// For maps, it contains the entry value type.
    -	fieldTypes map[protoreflect.FieldNumber]interface{}
    +	fieldTypes map[protoreflect.FieldNumber]any
     
     	// denseFields is a subset of fields where:
     	//	0 < fieldDesc.Number() < len(denseFields)
    @@ -28,7 +28,7 @@ type reflectMessageInfo struct {
     	denseFields []*fieldInfo
     
     	// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
    -	rangeInfos []interface{} // either *fieldInfo or *oneofInfo
    +	rangeInfos []any // either *fieldInfo or *oneofInfo
     
     	getUnknown   func(pointer) protoreflect.RawFields
     	setUnknown   func(pointer, protoreflect.RawFields)
    @@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
     		case fd.IsList():
     			if fd.Enum() != nil || fd.Message() != nil {
     				ft = fs.Type.Elem()
    +
    +				if ft.Kind() == reflect.Slice {
    +					ft = ft.Elem()
    +				}
    +
     			}
     			isMessage = fd.Message() != nil
     		case fd.Enum() != nil:
    @@ -224,7 +229,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
     		}
     		if ft != nil {
     			if mi.fieldTypes == nil {
    -				mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
    +				mi.fieldTypes = make(map[protoreflect.FieldNumber]any)
     			}
     			mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
     		}
    @@ -247,39 +252,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V
     		}
     	}
     }
    -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) {
    +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) {
     	if m == nil {
     		return false
     	}
    -	xd := xt.TypeDescriptor()
     	x, ok := (*m)[int32(xd.Number())]
     	if !ok {
     		return false
     	}
    +	if x.isUnexpandedLazy() {
    +		// Avoid calling x.Value(), which triggers a lazy unmarshal.
    +		return true
    +	}
     	switch {
     	case xd.IsList():
     		return x.Value().List().Len() > 0
     	case xd.IsMap():
     		return x.Value().Map().Len() > 0
    -	case xd.Message() != nil:
    -		return x.Value().Message().IsValid()
     	}
     	return true
     }
    -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) {
    -	delete(*m, int32(xt.TypeDescriptor().Number()))
    +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) {
    +	delete(*m, int32(xd.Number()))
     }
    -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value {
    -	xd := xt.TypeDescriptor()
    +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
     	if m != nil {
     		if x, ok := (*m)[int32(xd.Number())]; ok {
     			return x.Value()
     		}
     	}
    -	return xt.Zero()
    +	return xd.Type().Zero()
     }
    -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) {
    -	xd := xt.TypeDescriptor()
    +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) {
    +	xt := xd.Type()
     	isValid := true
     	switch {
     	case !xt.IsValidValue(v):
    @@ -292,7 +297,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value)
     		isValid = v.Message().IsValid()
     	}
     	if !isValid {
    -		panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
    +		panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName()))
     	}
     
     	if *m == nil {
    @@ -302,16 +307,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value)
     	x.Set(xt, v)
     	(*m)[int32(xd.Number())] = x
     }
    -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value {
    -	xd := xt.TypeDescriptor()
    +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
     	if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() {
     		panic("invalid Mutable on field with non-composite type")
     	}
     	if x, ok := (*m)[int32(xd.Number())]; ok {
     		return x.Value()
     	}
    -	v := xt.New()
    -	m.Set(xt, v)
    +	v := xd.Type().New()
    +	m.Set(xd, v)
     	return v
     }
     
    @@ -394,7 +398,7 @@ var (
     // MessageOf returns a reflective view over a message. The input must be a
     // pointer to a named Go struct. If the provided type has a ProtoReflect method,
     // it must be implemented by calling this method.
    -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
    +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message {
     	if reflect.TypeOf(m) != mi.GoReflectType {
     		panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
     	}
    @@ -422,13 +426,13 @@ func (m *messageIfaceWrapper) Reset() {
     func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
     	return (*messageReflectWrapper)(m)
     }
    -func (m *messageIfaceWrapper) protoUnwrap() interface{} {
    +func (m *messageIfaceWrapper) protoUnwrap() any {
     	return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
     }
     
     // checkField verifies that the provided field descriptor is valid.
     // Exactly one of the returned values is populated.
    -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) {
    +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) {
     	var fi *fieldInfo
     	if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
     		fi = mi.denseFields[n]
    @@ -457,7 +461,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo,
     		if !ok {
     			panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
     		}
    -		return nil, xtd.Type()
    +		return nil, xtd
     	}
     	panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
     }
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
    index 986322b195..3cd1fbc21f 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
    @@ -76,7 +76,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField,
     	isMessage := fd.Message() != nil
     
     	// TODO: Implement unsafe fast path?
    -	fieldOffset := offsetOf(fs, x)
    +	fieldOffset := offsetOf(fs)
     	return fieldInfo{
     		// NOTE: The logic below intentionally assumes that oneof fields are
     		// well-formatted. That is, the oneof interface never contains a
    @@ -152,7 +152,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
     	conv := NewConverter(ft, fd)
     
     	// TODO: Implement unsafe fast path?
    -	fieldOffset := offsetOf(fs, x)
    +	fieldOffset := offsetOf(fs)
     	return fieldInfo{
     		fieldDesc: fd,
     		has: func(p pointer) bool {
    @@ -205,7 +205,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
     	conv := NewConverter(reflect.PtrTo(ft), fd)
     
     	// TODO: Implement unsafe fast path?
    -	fieldOffset := offsetOf(fs, x)
    +	fieldOffset := offsetOf(fs)
     	return fieldInfo{
     		fieldDesc: fd,
     		has: func(p pointer) bool {
    @@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
     	ft := fs.Type
     	nullable := fd.HasPresence()
     	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
    +	var getter func(p pointer) protoreflect.Value
     	if nullable {
     		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
     			// This never occurs for generated message types.
    @@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
     		}
     	}
     	conv := NewConverter(ft, fd)
    +	fieldOffset := offsetOf(fs)
    +
    +	// Generate specialized getter functions to avoid going through reflect.Value
    +	if nullable {
    +		getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
    +	} else {
    +		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
    +	}
     
    -	// TODO: Implement unsafe fast path?
    -	fieldOffset := offsetOf(fs, x)
     	return fieldInfo{
     		fieldDesc: fd,
     		has: func(p pointer) bool {
     			if p.IsNil() {
     				return false
     			}
    -			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
     			if nullable {
    -				return !rv.IsNil()
    +				return !p.Apply(fieldOffset).Elem().IsNil()
     			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
     			switch rv.Kind() {
     			case reflect.Bool:
     				return rv.Bool()
    @@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
     			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
     			rv.Set(reflect.Zero(rv.Type()))
     		},
    -		get: func(p pointer) protoreflect.Value {
    -			if p.IsNil() {
    -				return conv.Zero()
    -			}
    -			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    -			if nullable {
    -				if rv.IsNil() {
    -					return conv.Zero()
    -				}
    -				if rv.Kind() == reflect.Ptr {
    -					rv = rv.Elem()
    -				}
    -			}
    -			return conv.PBValueOf(rv)
    -		},
    +		get: getter,
    +		// TODO: Implement unsafe fast path for set?
     		set: func(p pointer, v protoreflect.Value) {
     			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
     			if nullable && rv.Kind() == reflect.Ptr {
    @@ -339,7 +333,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
     }
     
     func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
    -	if !flags.ProtoLegacy {
    +	if !flags.ProtoLegacyWeak {
     		panic("no support for proto1 weak fields")
     	}
     
    @@ -416,7 +410,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
     	conv := NewConverter(ft, fd)
     
     	// TODO: Implement unsafe fast path?
    -	fieldOffset := offsetOf(fs, x)
    +	fieldOffset := offsetOf(fs)
     	return fieldInfo{
     		fieldDesc: fd,
     		has: func(p pointer) bool {
    @@ -425,7 +419,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
     			}
     			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
     			if fs.Type.Kind() != reflect.Ptr {
    -				return !isZero(rv)
    +				return !rv.IsZero()
     			}
     			return !rv.IsNil()
     		},
    @@ -472,7 +466,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
     	oi := &oneofInfo{oneofDesc: od}
     	if od.IsSynthetic() {
     		fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
    -		fieldOffset := offsetOf(fs, x)
    +		fieldOffset := offsetOf(fs)
     		oi.which = func(p pointer) protoreflect.FieldNumber {
     			if p.IsNil() {
     				return 0
    @@ -485,7 +479,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
     		}
     	} else {
     		fs := si.oneofsByName[od.Name()]
    -		fieldOffset := offsetOf(fs, x)
    +		fieldOffset := offsetOf(fs)
     		oi.which = func(p pointer) protoreflect.FieldNumber {
     			if p.IsNil() {
     				return 0
    @@ -503,41 +497,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
     	}
     	return oi
     }
    -
    -// isZero is identical to reflect.Value.IsZero.
    -// TODO: Remove this when Go1.13 is the minimally supported Go version.
    -func isZero(v reflect.Value) bool {
    -	switch v.Kind() {
    -	case reflect.Bool:
    -		return !v.Bool()
    -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    -		return v.Int() == 0
    -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    -		return v.Uint() == 0
    -	case reflect.Float32, reflect.Float64:
    -		return math.Float64bits(v.Float()) == 0
    -	case reflect.Complex64, reflect.Complex128:
    -		c := v.Complex()
    -		return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
    -	case reflect.Array:
    -		for i := 0; i < v.Len(); i++ {
    -			if !isZero(v.Index(i)) {
    -				return false
    -			}
    -		}
    -		return true
    -	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
    -		return v.IsNil()
    -	case reflect.String:
    -		return v.Len() == 0
    -	case reflect.Struct:
    -		for i := 0; i < v.NumField(); i++ {
    -			if !isZero(v.Field(i)) {
    -				return false
    -			}
    -		}
    -		return true
    -	default:
    -		panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
    -	}
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
    new file mode 100644
    index 0000000000..af5e063a1e
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
    @@ -0,0 +1,273 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Code generated by generate-types. DO NOT EDIT.
    +
    +package impl
    +
    +import (
    +	"reflect"
    +
    +	"google.golang.org/protobuf/reflect/protoreflect"
    +)
    +
    +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
    +	ft := fs.Type
    +	if ft.Kind() == reflect.Ptr {
    +		ft = ft.Elem()
    +	}
    +	if fd.Kind() == protoreflect.EnumKind {
    +		elemType := fs.Type.Elem()
    +		// Enums for nullable types.
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
    +			if rv.IsNil() {
    +				return conv.Zero()
    +			}
    +			return conv.PBValueOf(rv.Elem())
    +		}
    +	}
    +	switch ft.Kind() {
    +	case reflect.Bool:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).BoolPtr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfBool(**x)
    +		}
    +	case reflect.Int32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int32Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfInt32(**x)
    +		}
    +	case reflect.Uint32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint32Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfUint32(**x)
    +		}
    +	case reflect.Int64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int64Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfInt64(**x)
    +		}
    +	case reflect.Uint64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint64Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfUint64(**x)
    +		}
    +	case reflect.Float32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float32Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfFloat32(**x)
    +		}
    +	case reflect.Float64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float64Ptr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfFloat64(**x)
    +		}
    +	case reflect.String:
    +		if fd.Kind() == protoreflect.BytesKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).StringPtr()
    +				if *x == nil {
    +					return conv.Zero()
    +				}
    +				if len(**x) == 0 {
    +					return protoreflect.ValueOfBytes(nil)
    +				}
    +				return protoreflect.ValueOfBytes([]byte(**x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).StringPtr()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfString(**x)
    +		}
    +	case reflect.Slice:
    +		if fd.Kind() == protoreflect.StringKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).Bytes()
    +				if len(*x) == 0 {
    +					return conv.Zero()
    +				}
    +				return protoreflect.ValueOfString(string(*x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Bytes()
    +			if *x == nil {
    +				return conv.Zero()
    +			}
    +			return protoreflect.ValueOfBytes(*x)
    +		}
    +	}
    +	panic("unexpected protobuf kind: " + ft.Kind().String())
    +}
    +
    +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
    +	ft := fs.Type
    +	if fd.Kind() == protoreflect.EnumKind {
    +		// Enums for non nullable types.
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
    +			return conv.PBValueOf(rv)
    +		}
    +	}
    +	switch ft.Kind() {
    +	case reflect.Bool:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Bool()
    +			return protoreflect.ValueOfBool(*x)
    +		}
    +	case reflect.Int32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int32()
    +			return protoreflect.ValueOfInt32(*x)
    +		}
    +	case reflect.Uint32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint32()
    +			return protoreflect.ValueOfUint32(*x)
    +		}
    +	case reflect.Int64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Int64()
    +			return protoreflect.ValueOfInt64(*x)
    +		}
    +	case reflect.Uint64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Uint64()
    +			return protoreflect.ValueOfUint64(*x)
    +		}
    +	case reflect.Float32:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float32()
    +			return protoreflect.ValueOfFloat32(*x)
    +		}
    +	case reflect.Float64:
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Float64()
    +			return protoreflect.ValueOfFloat64(*x)
    +		}
    +	case reflect.String:
    +		if fd.Kind() == protoreflect.BytesKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).String()
    +				if len(*x) == 0 {
    +					return protoreflect.ValueOfBytes(nil)
    +				}
    +				return protoreflect.ValueOfBytes([]byte(*x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).String()
    +			return protoreflect.ValueOfString(*x)
    +		}
    +	case reflect.Slice:
    +		if fd.Kind() == protoreflect.StringKind {
    +			return func(p pointer) protoreflect.Value {
    +				if p.IsNil() {
    +					return conv.Zero()
    +				}
    +				x := p.Apply(fieldOffset).Bytes()
    +				return protoreflect.ValueOfString(string(*x))
    +			}
    +		}
    +		return func(p pointer) protoreflect.Value {
    +			if p.IsNil() {
    +				return conv.Zero()
    +			}
    +			x := p.Apply(fieldOffset).Bytes()
    +			return protoreflect.ValueOfBytes(*x)
    +		}
    +	}
    +	panic("unexpected protobuf kind: " + ft.Kind().String())
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
    index 741d6e5b6b..99dc23c6f0 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
    @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message {
     func (m *messageState) Interface() protoreflect.ProtoMessage {
     	return m.protoUnwrap().(protoreflect.ProtoMessage)
     }
    -func (m *messageState) protoUnwrap() interface{} {
    +func (m *messageState) protoUnwrap() any {
     	return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
     }
     func (m *messageState) ProtoMethods() *protoiface.Methods {
    -	m.messageInfo().init()
    -	return &m.messageInfo().methods
    +	mi := m.messageInfo()
    +	mi.init()
    +	return &mi.methods
     }
     
     // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
    @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo {
     }
     
     func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
    -	m.messageInfo().init()
    -	for _, ri := range m.messageInfo().rangeInfos {
    +	mi := m.messageInfo()
    +	mi.init()
    +	for _, ri := range mi.rangeInfos {
     		switch ri := ri.(type) {
     		case *fieldInfo:
     			if ri.has(m.pointer()) {
    @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V
     			}
     		case *oneofInfo:
     			if n := ri.which(m.pointer()); n > 0 {
    -				fi := m.messageInfo().fields[n]
    +				fi := mi.fields[n]
     				if !f(fi.fieldDesc, fi.get(m.pointer())) {
     					return
     				}
     			}
     		}
     	}
    -	m.messageInfo().extensionMap(m.pointer()).Range(f)
    +	mi.extensionMap(m.pointer()).Range(f)
     }
     func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.has(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Has(xt)
    +		return mi.extensionMap(m.pointer()).Has(xd)
     	}
     }
     func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		fi.clear(m.pointer())
     	} else {
    -		m.messageInfo().extensionMap(m.pointer()).Clear(xt)
    +		mi.extensionMap(m.pointer()).Clear(xd)
     	}
     }
     func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.get(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Get(xt)
    +		return mi.extensionMap(m.pointer()).Get(xd)
     	}
     }
     func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		fi.set(m.pointer(), v)
     	} else {
    -		m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
    +		mi.extensionMap(m.pointer()).Set(xd, v)
     	}
     }
     func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.mutable(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
    +		return mi.extensionMap(m.pointer()).Mutable(xd)
     	}
     }
     func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.newField()
     	} else {
    -		return xt.New()
    +		return xd.Type().New()
     	}
     }
     func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
    -	m.messageInfo().init()
    -	if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
     		return od.Fields().ByNumber(oi.which(m.pointer()))
     	}
     	panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
     }
     func (m *messageState) GetUnknown() protoreflect.RawFields {
    -	m.messageInfo().init()
    -	return m.messageInfo().getUnknown(m.pointer())
    +	mi := m.messageInfo()
    +	mi.init()
    +	return mi.getUnknown(m.pointer())
     }
     func (m *messageState) SetUnknown(b protoreflect.RawFields) {
    -	m.messageInfo().init()
    -	m.messageInfo().setUnknown(m.pointer(), b)
    +	mi := m.messageInfo()
    +	mi.init()
    +	mi.setUnknown(m.pointer(), b)
     }
     func (m *messageState) IsValid() bool {
     	return !m.pointer().IsNil()
    @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
     	}
     	return (*messageIfaceWrapper)(m)
     }
    -func (m *messageReflectWrapper) protoUnwrap() interface{} {
    +func (m *messageReflectWrapper) protoUnwrap() any {
     	return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
     }
     func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
    -	m.messageInfo().init()
    -	return &m.messageInfo().methods
    +	mi := m.messageInfo()
    +	mi.init()
    +	return &mi.methods
     }
     
     // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
    @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo {
     }
     
     func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
    -	m.messageInfo().init()
    -	for _, ri := range m.messageInfo().rangeInfos {
    +	mi := m.messageInfo()
    +	mi.init()
    +	for _, ri := range mi.rangeInfos {
     		switch ri := ri.(type) {
     		case *fieldInfo:
     			if ri.has(m.pointer()) {
    @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto
     			}
     		case *oneofInfo:
     			if n := ri.which(m.pointer()); n > 0 {
    -				fi := m.messageInfo().fields[n]
    +				fi := mi.fields[n]
     				if !f(fi.fieldDesc, fi.get(m.pointer())) {
     					return
     				}
     			}
     		}
     	}
    -	m.messageInfo().extensionMap(m.pointer()).Range(f)
    +	mi.extensionMap(m.pointer()).Range(f)
     }
     func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.has(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Has(xt)
    +		return mi.extensionMap(m.pointer()).Has(xd)
     	}
     }
     func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		fi.clear(m.pointer())
     	} else {
    -		m.messageInfo().extensionMap(m.pointer()).Clear(xt)
    +		mi.extensionMap(m.pointer()).Clear(xd)
     	}
     }
     func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.get(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Get(xt)
    +		return mi.extensionMap(m.pointer()).Get(xd)
     	}
     }
     func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		fi.set(m.pointer(), v)
     	} else {
    -		m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
    +		mi.extensionMap(m.pointer()).Set(xd, v)
     	}
     }
     func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.mutable(m.pointer())
     	} else {
    -		return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
    +		return mi.extensionMap(m.pointer()).Mutable(xd)
     	}
     }
     func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
    -	m.messageInfo().init()
    -	if fi, xt := m.messageInfo().checkField(fd); fi != nil {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if fi, xd := mi.checkField(fd); fi != nil {
     		return fi.newField()
     	} else {
    -		return xt.New()
    +		return xd.Type().New()
     	}
     }
     func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
    -	m.messageInfo().init()
    -	if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
    +	mi := m.messageInfo()
    +	mi.init()
    +	if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
     		return od.Fields().ByNumber(oi.which(m.pointer()))
     	}
     	panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
     }
     func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
    -	m.messageInfo().init()
    -	return m.messageInfo().getUnknown(m.pointer())
    +	mi := m.messageInfo()
    +	mi.init()
    +	return mi.getUnknown(m.pointer())
     }
     func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
    -	m.messageInfo().init()
    -	m.messageInfo().setUnknown(m.pointer(), b)
    +	mi := m.messageInfo()
    +	mi.init()
    +	mi.setUnknown(m.pointer(), b)
     }
     func (m *messageReflectWrapper) IsValid() bool {
     	return !m.pointer().IsNil()
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
    deleted file mode 100644
    index 517e94434c..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
    +++ /dev/null
    @@ -1,215 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build purego || appengine
    -// +build purego appengine
    -
    -package impl
    -
    -import (
    -	"fmt"
    -	"reflect"
    -	"sync"
    -)
    -
    -const UnsafeEnabled = false
    -
    -// Pointer is an opaque pointer type.
    -type Pointer interface{}
    -
    -// offset represents the offset to a struct field, accessible from a pointer.
    -// The offset is the field index into a struct.
    -type offset struct {
    -	index  int
    -	export exporter
    -}
    -
    -// offsetOf returns a field offset for the struct field.
    -func offsetOf(f reflect.StructField, x exporter) offset {
    -	if len(f.Index) != 1 {
    -		panic("embedded structs are not supported")
    -	}
    -	if f.PkgPath == "" {
    -		return offset{index: f.Index[0]} // field is already exported
    -	}
    -	if x == nil {
    -		panic("exporter must be provided for unexported field")
    -	}
    -	return offset{index: f.Index[0], export: x}
    -}
    -
    -// IsValid reports whether the offset is valid.
    -func (f offset) IsValid() bool { return f.index >= 0 }
    -
    -// invalidOffset is an invalid field offset.
    -var invalidOffset = offset{index: -1}
    -
    -// zeroOffset is a noop when calling pointer.Apply.
    -var zeroOffset = offset{index: 0}
    -
    -// pointer is an abstract representation of a pointer to a struct or field.
    -type pointer struct{ v reflect.Value }
    -
    -// pointerOf returns p as a pointer.
    -func pointerOf(p Pointer) pointer {
    -	return pointerOfIface(p)
    -}
    -
    -// pointerOfValue returns v as a pointer.
    -func pointerOfValue(v reflect.Value) pointer {
    -	return pointer{v: v}
    -}
    -
    -// pointerOfIface returns the pointer portion of an interface.
    -func pointerOfIface(v interface{}) pointer {
    -	return pointer{v: reflect.ValueOf(v)}
    -}
    -
    -// IsNil reports whether the pointer is nil.
    -func (p pointer) IsNil() bool {
    -	return p.v.IsNil()
    -}
    -
    -// Apply adds an offset to the pointer to derive a new pointer
    -// to a specified field. The current pointer must be pointing at a struct.
    -func (p pointer) Apply(f offset) pointer {
    -	if f.export != nil {
    -		if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
    -			return pointer{v: v}
    -		}
    -	}
    -	return pointer{v: p.v.Elem().Field(f.index).Addr()}
    -}
    -
    -// AsValueOf treats p as a pointer to an object of type t and returns the value.
    -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
    -func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
    -	if got := p.v.Type().Elem(); got != t {
    -		panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
    -	}
    -	return p.v
    -}
    -
    -// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
    -// It is equivalent to p.AsValueOf(t).Interface()
    -func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
    -	return p.AsValueOf(t).Interface()
    -}
    -
    -func (p pointer) Bool() *bool              { return p.v.Interface().(*bool) }
    -func (p pointer) BoolPtr() **bool          { return p.v.Interface().(**bool) }
    -func (p pointer) BoolSlice() *[]bool       { return p.v.Interface().(*[]bool) }
    -func (p pointer) Int32() *int32            { return p.v.Interface().(*int32) }
    -func (p pointer) Int32Ptr() **int32        { return p.v.Interface().(**int32) }
    -func (p pointer) Int32Slice() *[]int32     { return p.v.Interface().(*[]int32) }
    -func (p pointer) Int64() *int64            { return p.v.Interface().(*int64) }
    -func (p pointer) Int64Ptr() **int64        { return p.v.Interface().(**int64) }
    -func (p pointer) Int64Slice() *[]int64     { return p.v.Interface().(*[]int64) }
    -func (p pointer) Uint32() *uint32          { return p.v.Interface().(*uint32) }
    -func (p pointer) Uint32Ptr() **uint32      { return p.v.Interface().(**uint32) }
    -func (p pointer) Uint32Slice() *[]uint32   { return p.v.Interface().(*[]uint32) }
    -func (p pointer) Uint64() *uint64          { return p.v.Interface().(*uint64) }
    -func (p pointer) Uint64Ptr() **uint64      { return p.v.Interface().(**uint64) }
    -func (p pointer) Uint64Slice() *[]uint64   { return p.v.Interface().(*[]uint64) }
    -func (p pointer) Float32() *float32        { return p.v.Interface().(*float32) }
    -func (p pointer) Float32Ptr() **float32    { return p.v.Interface().(**float32) }
    -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
    -func (p pointer) Float64() *float64        { return p.v.Interface().(*float64) }
    -func (p pointer) Float64Ptr() **float64    { return p.v.Interface().(**float64) }
    -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
    -func (p pointer) String() *string          { return p.v.Interface().(*string) }
    -func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
    -func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
    -func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
    -func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
    -func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
    -func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
    -func (p pointer) Extensions() *map[int32]ExtensionField {
    -	return p.v.Interface().(*map[int32]ExtensionField)
    -}
    -
    -func (p pointer) Elem() pointer {
    -	return pointer{v: p.v.Elem()}
    -}
    -
    -// PointerSlice copies []*T from p as a new []pointer.
    -// This behavior differs from the implementation in pointer_unsafe.go.
    -func (p pointer) PointerSlice() []pointer {
    -	// TODO: reconsider this
    -	if p.v.IsNil() {
    -		return nil
    -	}
    -	n := p.v.Elem().Len()
    -	s := make([]pointer, n)
    -	for i := 0; i < n; i++ {
    -		s[i] = pointer{v: p.v.Elem().Index(i)}
    -	}
    -	return s
    -}
    -
    -// AppendPointerSlice appends v to p, which must be a []*T.
    -func (p pointer) AppendPointerSlice(v pointer) {
    -	sp := p.v.Elem()
    -	sp.Set(reflect.Append(sp, v.v))
    -}
    -
    -// SetPointer sets *p to v.
    -func (p pointer) SetPointer(v pointer) {
    -	p.v.Elem().Set(v.v)
    -}
    -
    -func growSlice(p pointer, addCap int) {
    -	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
    -	in := p.v.Elem()
    -	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
    -	reflect.Copy(out, in)
    -	p.v.Elem().Set(out)
    -}
    -
    -func (p pointer) growBoolSlice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growInt32Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growUint32Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growInt64Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growUint64Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growFloat64Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (p pointer) growFloat32Slice(addCap int) {
    -	growSlice(p, addCap)
    -}
    -
    -func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
    -func (ms *messageState) pointer() pointer                 { panic("not supported") }
    -func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
    -func (ms *messageState) LoadMessageInfo() *MessageInfo    { panic("not supported") }
    -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
    -
    -type atomicNilMessage struct {
    -	once sync.Once
    -	m    messageReflectWrapper
    -}
    -
    -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
    -	m.once.Do(func() {
    -		m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
    -		m.m.mi = mi
    -	})
    -	return &m.m
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
    index 4b020e3116..6bed45e35c 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
    @@ -2,15 +2,14 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine
    -// +build !purego,!appengine
    -
     package impl
     
     import (
     	"reflect"
     	"sync/atomic"
     	"unsafe"
    +
    +	"google.golang.org/protobuf/internal/protolazy"
     )
     
     const UnsafeEnabled = true
    @@ -23,7 +22,7 @@ type Pointer unsafe.Pointer
     type offset uintptr
     
     // offsetOf returns a field offset for the struct field.
    -func offsetOf(f reflect.StructField, x exporter) offset {
    +func offsetOf(f reflect.StructField) offset {
     	return offset(f.Offset)
     }
     
    @@ -50,7 +49,7 @@ func pointerOfValue(v reflect.Value) pointer {
     }
     
     // pointerOfIface returns the pointer portion of an interface.
    -func pointerOfIface(v interface{}) pointer {
    +func pointerOfIface(v any) pointer {
     	type ifaceHeader struct {
     		Type unsafe.Pointer
     		Data unsafe.Pointer
    @@ -80,7 +79,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
     
     // AsIfaceOf treats p as a pointer to an object of type t and returns the value.
     // It is equivalent to p.AsValueOf(t).Interface()
    -func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
    +func (p pointer) AsIfaceOf(t reflect.Type) any {
     	// TODO: Use tricky unsafe magic to directly create ifaceHeader.
     	return p.AsValueOf(t).Interface()
     }
    @@ -114,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p)
     func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
     func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
     func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
    +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
    +	return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
    +}
    +
    +func (p pointer) PresenceInfo() presence {
    +	return presence{P: p.p}
    +}
     
     func (p pointer) Elem() pointer {
     	return pointer{p: *(*unsafe.Pointer)(p.p)}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
    new file mode 100644
    index 0000000000..38aa7b7dcf
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
    @@ -0,0 +1,42 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"sync/atomic"
    +	"unsafe"
    +)
    +
    +func (p pointer) AtomicGetPointer() pointer {
    +	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
    +}
    +
    +func (p pointer) AtomicSetPointer(v pointer) {
    +	atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
    +}
    +
    +func (p pointer) AtomicSetNilPointer() {
    +	atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
    +}
    +
    +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
    +	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
    +		return v
    +	}
    +	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
    +}
    +
    +type atomicV1MessageInfo struct{ p Pointer }
    +
    +func (mi *atomicV1MessageInfo) Get() Pointer {
    +	return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
    +}
    +
    +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
    +	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
    +		return p
    +	}
    +	return mi.Get()
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
    new file mode 100644
    index 0000000000..914cb1deda
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
    @@ -0,0 +1,142 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package impl
    +
    +import (
    +	"sync/atomic"
    +	"unsafe"
    +)
    +
    +// presenceSize represents the size of a presence set, which should be the largest index of the set+1
    +type presenceSize uint32
    +
    +// presence is the internal representation of the bitmap array in a generated protobuf
    +type presence struct {
    +	// This is a pointer to the beginning of an array of uint32
    +	P unsafe.Pointer
    +}
    +
    +func (p presence) toElem(num uint32) (ret *uint32) {
    +	const (
    +		bitsPerByte = 8
    +		siz         = unsafe.Sizeof(*ret)
    +	)
    +	// p.P points to an array of uint32, num is the bit in this array that the
    +	// caller wants to check/manipulate. Calculate the index in the array that
    +	// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
    +	offset := uintptr(num) / (siz * bitsPerByte) * siz
    +	return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
    +}
    +
    +// Present checks for the presence of a specific field number in a presence set.
    +func (p presence) Present(num uint32) bool {
    +	if p.P == nil {
    +		return false
    +	}
    +	return Export{}.Present(p.toElem(num), num)
    +}
    +
    +// SetPresent adds presence for a specific field number in a presence set.
    +func (p presence) SetPresent(num uint32, size presenceSize) {
    +	Export{}.SetPresent(p.toElem(num), num, uint32(size))
    +}
    +
    +// SetPresentUnatomic adds presence for a specific field number in a presence set without using
    +// atomic operations. Only to be called during unmarshaling.
    +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
    +	Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
    +}
    +
    +// ClearPresent removes presence for a specific field number in a presence set.
    +func (p presence) ClearPresent(num uint32) {
    +	Export{}.ClearPresent(p.toElem(num), num)
    +}
    +
    +// LoadPresenceCache (together with PresentInCache) allows for a
    +// cached version of checking for presence without re-reading the word
    +// for every field. It is optimized for efficiency and assumes no
    +// simltaneous mutation of the presence set (or at least does not have
    +// a problem with simultaneous mutation giving inconsistent results).
    +func (p presence) LoadPresenceCache() (current uint32) {
    +	if p.P == nil {
    +		return 0
    +	}
    +	return atomic.LoadUint32((*uint32)(p.P))
    +}
    +
    +// PresentInCache reads presence from a cached word in the presence
    +// bitmap. It caches up a new word if the bit is outside the
    +// word. This is for really fast iteration through bitmaps in cases
    +// where we either know that the bitmap will not be altered, or we
    +// don't care about inconsistencies caused by simultaneous writes.
    +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
    +	if num/32 != *cachedElement {
    +		o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
    +		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
    +		*current = atomic.LoadUint32(q)
    +		*cachedElement = num / 32
    +	}
    +	return (*current & (1 << (num % 32))) > 0
    +}
    +
    +// AnyPresent checks if any field is marked as present in the bitmap.
    +func (p presence) AnyPresent(size presenceSize) bool {
    +	n := uintptr((size + 31) / 32)
    +	for j := uintptr(0); j < n; j++ {
    +		o := j * unsafe.Sizeof(uint32(0))
    +		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
    +		b := atomic.LoadUint32(q)
    +		if b > 0 {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// toRaceDetectData finds the preceding RaceDetectHookData in a
    +// message by using pointer arithmetic. As the type of the presence
    +// set (bitmap) varies with the number of fields in the protobuf, we
    +// can not have a struct type containing the array and the
    +// RaceDetectHookData.  instead the RaceDetectHookData is placed
    +// immediately before the bitmap array, and we find it by walking
    +// backwards in the struct.
    +//
    +// This method is only called from the race-detect version of the code,
    +// so RaceDetectHookData is never an empty struct.
    +func (p presence) toRaceDetectData() *RaceDetectHookData {
    +	var template struct {
    +		d RaceDetectHookData
    +		a [1]uint32
    +	}
    +	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
    +	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
    +}
    +
    +func atomicLoadShadowPresence(p **[]byte) *[]byte {
    +	return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
    +}
    +func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
    +	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
    +}
    +
    +// findPointerToRaceDetectData finds the preceding RaceDetectHookData
    +// in a message by using pointer arithmetic. For the methods called
    +// directy from generated code, we don't have a pointer to the
    +// beginning of the presence set, but a pointer inside the array. As
    +// we know the index of the bit we're manipulating (num), we can
    +// calculate which element of the array ptr is pointing to. With that
    +// information we find the preceding RaceDetectHookData and can
    +// manipulate the shadow bitmap.
    +//
    +// This method is only called from the race-detect version of the
    +// code, so RaceDetectHookData is never an empty struct.
    +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
    +	var template struct {
    +		d RaceDetectHookData
    +		a [1]uint32
    +	}
    +	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
    +	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
    index a24e6bbd7a..b534a3d6db 100644
    --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
    +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
    @@ -37,6 +37,10 @@ const (
     
     	// ValidationValid indicates that unmarshaling the message will succeed.
     	ValidationValid
    +
    +	// ValidationWrongWireType indicates that a validated field does not have
    +	// the expected wire type.
    +	ValidationWrongWireType
     )
     
     func (v ValidationStatus) String() string {
    @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
     		switch fd.Kind() {
     		case protoreflect.MessageKind:
     			vi.typ = validationTypeMessage
    +
    +			if ft.Kind() == reflect.Ptr {
    +				// Repeated opaque message fields are *[]*T.
    +				ft = ft.Elem()
    +			}
    +
     			if ft.Kind() == reflect.Slice {
     				vi.mi = getMessageInfo(ft.Elem())
     			}
     		case protoreflect.GroupKind:
     			vi.typ = validationTypeGroup
    +
    +			if ft.Kind() == reflect.Ptr {
    +				// Repeated opaque message fields are *[]*T.
    +				ft = ft.Elem()
    +			}
    +
     			if ft.Kind() == reflect.Slice {
     				vi.mi = getMessageInfo(ft.Elem())
     			}
    diff --git a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go
    index a319550f69..17b3f27b8a 100644
    --- a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go
    +++ b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go
    @@ -86,7 +86,7 @@ func appendMessage(b []byte, m protoreflect.Message) []byte {
     	return b
     }
     
    -var protocmpMessageType = reflect.TypeOf(map[string]interface{}(nil))
    +var protocmpMessageType = reflect.TypeOf(map[string]any(nil))
     
     func appendKnownMessage(b []byte, m protoreflect.Message) []byte {
     	md := m.Descriptor()
    @@ -98,7 +98,7 @@ func appendKnownMessage(b []byte, m protoreflect.Message) []byte {
     		if v := reflect.ValueOf(m); v.Type().ConvertibleTo(protocmpMessageType) {
     			// For protocmp.Message, directly obtain the sub-message value
     			// which is stored in structured form, rather than as raw bytes.
    -			m2 := v.Convert(protocmpMessageType).Interface().(map[string]interface{})
    +			m2 := v.Convert(protocmpMessageType).Interface().(map[string]any)
     			v, ok := m2[string(genid.Any_Value_field_name)].(proto.Message)
     			if !ok {
     				return nil
    diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
    index 1665a68e5b..a1f09162d0 100644
    --- a/vendor/google.golang.org/protobuf/internal/order/range.go
    +++ b/vendor/google.golang.org/protobuf/internal/order/range.go
    @@ -18,7 +18,7 @@ type messageField struct {
     }
     
     var messageFieldPool = sync.Pool{
    -	New: func() interface{} { return new([]messageField) },
    +	New: func() any { return new([]messageField) },
     }
     
     type (
    @@ -69,7 +69,7 @@ type mapEntry struct {
     }
     
     var mapEntryPool = sync.Pool{
    -	New: func() interface{} { return new([]mapEntry) },
    +	New: func() any { return new([]mapEntry) },
     }
     
     type (
    diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
    new file mode 100644
    index 0000000000..82e5cab4aa
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
    @@ -0,0 +1,364 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Helper code for parsing a protocol buffer
    +
    +package protolazy
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"io"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +)
    +
    +// BufferReader is a structure encapsulating a protobuf and a current position
    +type BufferReader struct {
    +	Buf []byte
    +	Pos int
    +}
    +
    +// NewBufferReader creates a new BufferRead from a protobuf
    +func NewBufferReader(buf []byte) BufferReader {
    +	return BufferReader{Buf: buf, Pos: 0}
    +}
    +
    +var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
    +var errOverflow = errors.New("proto: integer overflow")
    +
    +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
    +	i := b.Pos
    +	l := len(b.Buf)
    +
    +	for shift := uint(0); shift < 64; shift += 7 {
    +		if i >= l {
    +			err = io.ErrUnexpectedEOF
    +			return
    +		}
    +		v := b.Buf[i]
    +		i++
    +		x |= (uint64(v) & 0x7F) << shift
    +		if v < 0x80 {
    +			b.Pos = i
    +			return
    +		}
    +	}
    +
    +	// The number is too large to represent in a 64-bit value.
    +	err = errOverflow
    +	return
    +}
    +
    +// decodeVarint decodes a varint at the current position
    +func (b *BufferReader) DecodeVarint() (x uint64, err error) {
    +	i := b.Pos
    +	buf := b.Buf
    +
    +	if i >= len(buf) {
    +		return 0, io.ErrUnexpectedEOF
    +	} else if buf[i] < 0x80 {
    +		b.Pos++
    +		return uint64(buf[i]), nil
    +	} else if len(buf)-i < 10 {
    +		return b.DecodeVarintSlow()
    +	}
    +
    +	var v uint64
    +	// we already checked the first byte
    +	x = uint64(buf[i]) & 127
    +	i++
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 7
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 14
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 21
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 28
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 35
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 42
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 49
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 56
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint64(buf[i])
    +	i++
    +	x |= (v & 127) << 63
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	return 0, errOverflow
    +
    +done:
    +	b.Pos = i
    +	return
    +}
    +
    +// decodeVarint32 decodes a varint32 at the current position
    +func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
    +	i := b.Pos
    +	buf := b.Buf
    +
    +	if i >= len(buf) {
    +		return 0, io.ErrUnexpectedEOF
    +	} else if buf[i] < 0x80 {
    +		b.Pos++
    +		return uint32(buf[i]), nil
    +	} else if len(buf)-i < 5 {
    +		v, err := b.DecodeVarintSlow()
    +		return uint32(v), err
    +	}
    +
    +	var v uint32
    +	// we already checked the first byte
    +	x = uint32(buf[i]) & 127
    +	i++
    +
    +	v = uint32(buf[i])
    +	i++
    +	x |= (v & 127) << 7
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint32(buf[i])
    +	i++
    +	x |= (v & 127) << 14
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint32(buf[i])
    +	i++
    +	x |= (v & 127) << 21
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	v = uint32(buf[i])
    +	i++
    +	x |= (v & 127) << 28
    +	if v < 128 {
    +		goto done
    +	}
    +
    +	return 0, errOverflow
    +
    +done:
    +	b.Pos = i
    +	return
    +}
    +
    +// skipValue skips a value in the protobuf, based on the specified tag
    +func (b *BufferReader) SkipValue(tag uint32) (err error) {
    +	wireType := tag & 0x7
    +	switch protowire.Type(wireType) {
    +	case protowire.VarintType:
    +		err = b.SkipVarint()
    +	case protowire.Fixed64Type:
    +		err = b.SkipFixed64()
    +	case protowire.BytesType:
    +		var n uint32
    +		n, err = b.DecodeVarint32()
    +		if err == nil {
    +			err = b.Skip(int(n))
    +		}
    +	case protowire.StartGroupType:
    +		err = b.SkipGroup(tag)
    +	case protowire.Fixed32Type:
    +		err = b.SkipFixed32()
    +	default:
    +		err = fmt.Errorf("Unexpected wire type (%d)", wireType)
    +	}
    +	return
    +}
    +
    +// skipGroup skips a group with the specified tag.  It executes efficiently using a tag stack
    +func (b *BufferReader) SkipGroup(tag uint32) (err error) {
    +	tagStack := make([]uint32, 0, 16)
    +	tagStack = append(tagStack, tag)
    +	var n uint32
    +	for len(tagStack) > 0 {
    +		tag, err = b.DecodeVarint32()
    +		if err != nil {
    +			return err
    +		}
    +		switch protowire.Type(tag & 0x7) {
    +		case protowire.VarintType:
    +			err = b.SkipVarint()
    +		case protowire.Fixed64Type:
    +			err = b.Skip(8)
    +		case protowire.BytesType:
    +			n, err = b.DecodeVarint32()
    +			if err == nil {
    +				err = b.Skip(int(n))
    +			}
    +		case protowire.StartGroupType:
    +			tagStack = append(tagStack, tag)
    +		case protowire.Fixed32Type:
    +			err = b.SkipFixed32()
    +		case protowire.EndGroupType:
    +			if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
    +				tagStack = tagStack[:len(tagStack)-1]
    +			} else {
    +				err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
    +					protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
    +			}
    +		}
    +		if err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +// skipVarint effiently skips a varint
    +func (b *BufferReader) SkipVarint() (err error) {
    +	i := b.Pos
    +
    +	if len(b.Buf)-i < 10 {
    +		// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
    +		if _, err := b.DecodeVarintSlow(); err != nil {
    +			return err
    +		}
    +		return nil
    +	}
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	i++
    +
    +	if b.Buf[i] < 0x80 {
    +		goto out
    +	}
    +	return errOverflow
    +
    +out:
    +	b.Pos = i + 1
    +	return nil
    +}
    +
    +// skip skips the specified number of bytes
    +func (b *BufferReader) Skip(n int) (err error) {
    +	if len(b.Buf) < b.Pos+n {
    +		return io.ErrUnexpectedEOF
    +	}
    +	b.Pos += n
    +	return
    +}
    +
    +// skipFixed64 skips a fixed64
    +func (b *BufferReader) SkipFixed64() (err error) {
    +	return b.Skip(8)
    +}
    +
    +// skipFixed32 skips a fixed32
    +func (b *BufferReader) SkipFixed32() (err error) {
    +	return b.Skip(4)
    +}
    +
    +// skipBytes skips a set of bytes
    +func (b *BufferReader) SkipBytes() (err error) {
    +	n, err := b.DecodeVarint32()
    +	if err != nil {
    +		return err
    +	}
    +	return b.Skip(int(n))
    +}
    +
    +// Done returns whether we are at the end of the protobuf
    +func (b *BufferReader) Done() bool {
    +	return b.Pos == len(b.Buf)
    +}
    +
    +// Remaining returns how many bytes remain
    +func (b *BufferReader) Remaining() int {
    +	return len(b.Buf) - b.Pos
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
    new file mode 100644
    index 0000000000..ff4d4834bb
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
    @@ -0,0 +1,359 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package protolazy contains internal data structures for lazy message decoding.
    +package protolazy
    +
    +import (
    +	"fmt"
    +	"sort"
    +
    +	"google.golang.org/protobuf/encoding/protowire"
    +	piface "google.golang.org/protobuf/runtime/protoiface"
    +)
    +
    +// IndexEntry is the structure for an index of the fields in a message of a
    +// proto (not descending to sub-messages)
    +type IndexEntry struct {
    +	FieldNum uint32
    +	// first byte of this tag/field
    +	Start uint32
    +	// first byte after a contiguous sequence of bytes for this tag/field, which could
    +	// include a single encoding of the field, or multiple encodings for the field
    +	End uint32
    +	// True if this protobuf segment includes multiple encodings of the field
    +	MultipleContiguous bool
    +}
    +
    +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
    +//
    +// Deprecated: Do not use. This will be deleted in the near future.
    +type XXX_lazyUnmarshalInfo struct {
    +	// Index of fields and their positions in the protobuf for this
    +	// message.  Make index be a pointer to a slice so it can be updated
    +	// atomically.  The index pointer is only set once (lazily when/if
    +	// the index is first needed), and must always be SET and LOADED
    +	// ATOMICALLY.
    +	index *[]IndexEntry
    +	// The protobuf associated with this lazily decoded message.  It is
    +	// only set during proto.Unmarshal().  It doesn't need to be set and
    +	// loaded atomically, since any simultaneous set (Unmarshal) and read
    +	// (during a get) would already be a race in the app code.
    +	Protobuf []byte
    +	// The flags present when Unmarshal was originally called for this particular message
    +	unmarshalFlags piface.UnmarshalInputFlags
    +}
    +
    +// The Buffer and SetBuffer methods let v2/internal/impl interact with
    +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
    +
    +// Buffer returns the lazy unmarshal buffer.
    +//
    +// Deprecated: Do not use. This will be deleted in the near future.
    +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
    +	return lazy.Protobuf
    +}
    +
    +// SetBuffer sets the lazy unmarshal buffer.
    +//
    +// Deprecated: Do not use. This will be deleted in the near future.
    +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
    +	lazy.Protobuf = b
    +}
    +
    +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
    +// The flags should reflect how Unmarshal was called.
    +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
    +	lazy.unmarshalFlags = f
    +}
    +
    +// UnmarshalFlags returns the original unmarshalInputFlags.
    +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
    +	return lazy.unmarshalFlags
    +}
    +
    +// AllowedPartial returns true if the user originally unmarshalled this message with
    +// AllowPartial set to true
    +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
    +	return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
    +}
    +
    +func protoFieldNumber(tag uint32) uint32 {
    +	return tag >> 3
    +}
    +
    +// buildIndex builds an index of the specified protobuf, return the index
    +// array and an error.
    +func buildIndex(buf []byte) ([]IndexEntry, error) {
    +	index := make([]IndexEntry, 0, 16)
    +	var lastProtoFieldNum uint32
    +	var outOfOrder bool
    +
    +	var r BufferReader = NewBufferReader(buf)
    +
    +	for !r.Done() {
    +		var tag uint32
    +		var err error
    +		var curPos = r.Pos
    +		// INLINED: tag, err = r.DecodeVarint32()
    +		{
    +			i := r.Pos
    +			buf := r.Buf
    +
    +			if i >= len(buf) {
    +				return nil, errOutOfBounds
    +			} else if buf[i] < 0x80 {
    +				r.Pos++
    +				tag = uint32(buf[i])
    +			} else if r.Remaining() < 5 {
    +				var v uint64
    +				v, err = r.DecodeVarintSlow()
    +				tag = uint32(v)
    +			} else {
    +				var v uint32
    +				// we already checked the first byte
    +				tag = uint32(buf[i]) & 127
    +				i++
    +
    +				v = uint32(buf[i])
    +				i++
    +				tag |= (v & 127) << 7
    +				if v < 128 {
    +					goto done
    +				}
    +
    +				v = uint32(buf[i])
    +				i++
    +				tag |= (v & 127) << 14
    +				if v < 128 {
    +					goto done
    +				}
    +
    +				v = uint32(buf[i])
    +				i++
    +				tag |= (v & 127) << 21
    +				if v < 128 {
    +					goto done
    +				}
    +
    +				v = uint32(buf[i])
    +				i++
    +				tag |= (v & 127) << 28
    +				if v < 128 {
    +					goto done
    +				}
    +
    +				return nil, errOutOfBounds
    +
    +			done:
    +				r.Pos = i
    +			}
    +		}
    +		// DONE: tag, err = r.DecodeVarint32()
    +
    +		fieldNum := protoFieldNumber(tag)
    +		if fieldNum < lastProtoFieldNum {
    +			outOfOrder = true
    +		}
    +
    +		// Skip the current value -- will skip over an entire group as well.
    +		// INLINED: err = r.SkipValue(tag)
    +		wireType := tag & 0x7
    +		switch protowire.Type(wireType) {
    +		case protowire.VarintType:
    +			// INLINED: err = r.SkipVarint()
    +			i := r.Pos
    +
    +			if len(r.Buf)-i < 10 {
    +				// Use DecodeVarintSlow() to skip while
    +				// checking for buffer overflow, but ignore result
    +				_, err = r.DecodeVarintSlow()
    +				goto out2
    +			}
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			i++
    +
    +			if r.Buf[i] < 0x80 {
    +				goto out
    +			}
    +			return nil, errOverflow
    +		out:
    +			r.Pos = i + 1
    +			// DONE: err = r.SkipVarint()
    +		case protowire.Fixed64Type:
    +			err = r.SkipFixed64()
    +		case protowire.BytesType:
    +			var n uint32
    +			n, err = r.DecodeVarint32()
    +			if err == nil {
    +				err = r.Skip(int(n))
    +			}
    +		case protowire.StartGroupType:
    +			err = r.SkipGroup(tag)
    +		case protowire.Fixed32Type:
    +			err = r.SkipFixed32()
    +		default:
    +			err = fmt.Errorf("Unexpected wire type (%d)", wireType)
    +		}
    +		// DONE: err = r.SkipValue(tag)
    +
    +	out2:
    +		if err != nil {
    +			return nil, err
    +		}
    +		if fieldNum != lastProtoFieldNum {
    +			index = append(index, IndexEntry{FieldNum: fieldNum,
    +				Start: uint32(curPos),
    +				End:   uint32(r.Pos)},
    +			)
    +		} else {
    +			index[len(index)-1].End = uint32(r.Pos)
    +			index[len(index)-1].MultipleContiguous = true
    +		}
    +		lastProtoFieldNum = fieldNum
    +	}
    +	if outOfOrder {
    +		sort.Slice(index, func(i, j int) bool {
    +			return index[i].FieldNum < index[j].FieldNum ||
    +				(index[i].FieldNum == index[j].FieldNum &&
    +					index[i].Start < index[j].Start)
    +		})
    +	}
    +	return index, nil
    +}
    +
    +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
    +	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
    +	if multipleEntries != nil {
    +		for _, entry := range multipleEntries {
    +			size += int(entry.End - entry.Start)
    +		}
    +		return size
    +	}
    +	if !found {
    +		return 0
    +	}
    +	return int(end - start)
    +}
    +
    +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
    +	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
    +	if multipleEntries != nil {
    +		for _, entry := range multipleEntries {
    +			b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
    +		}
    +		return b, true
    +	}
    +	if !found {
    +		return nil, false
    +	}
    +	b = append(b, lazy.Protobuf[start:end]...)
    +	return b, true
    +}
    +
    +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
    +	atomicStoreIndex(&lazy.index, &index)
    +}
    +
    +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
    +// (including protobuf), returns startOffset/endOffset/found.
    +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
    +	if lazy.Protobuf == nil {
    +		// There is no backing protobuf for this message -- it was made from a builder
    +		return 0, 0, false, false, nil
    +	}
    +	index := atomicLoadIndex(&lazy.index)
    +	if index == nil {
    +		r, err := buildIndex(lazy.Protobuf)
    +		if err != nil {
    +			panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
    +		}
    +		// lazy.index is a pointer to the slice returned by BuildIndex
    +		index = &r
    +		atomicStoreIndex(&lazy.index, index)
    +	}
    +	return lookupField(index, fieldNum)
    +}
    +
    +// lookupField returns the offset at which the indicated field starts using
    +// the index, offset immediately after field ends (including all instances of
    +// a repeated field), and bools indicating if field was found and if there
    +// are multiple encodings of the field in the byte range.
    +//
    +// To hande the uncommon case where there are repeated encodings for the same
    +// field which are not consecutive in the protobuf (so we need to returns
    +// multiple start/end offsets), we also return a slice multipleEntries.  If
    +// multipleEntries is non-nil, then multiple entries were found, and the
    +// values in the slice should be used, rather than start/end/found.
    +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
    +	// The pointer indexp to the index was already loaded atomically.
    +	// The slice is uniquely associated with the pointer, so it doesn't
    +	// need to be loaded atomically.
    +	index := *indexp
    +	for i, entry := range index {
    +		if fieldNum == entry.FieldNum {
    +			if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
    +				// Handle the uncommon case where there are
    +				// repeated entries for the same field which
    +				// are not contiguous in the protobuf.
    +				multiple := make([]IndexEntry, 1, 2)
    +				multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
    +				i++
    +				for i < len(index) && index[i].FieldNum == fieldNum {
    +					multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
    +					i++
    +				}
    +				return 0, 0, false, false, multiple
    +
    +			}
    +			return entry.Start, entry.End, true, entry.MultipleContiguous, nil
    +		}
    +		if fieldNum < entry.FieldNum {
    +			return 0, 0, false, false, nil
    +		}
    +	}
    +	return 0, 0, false, false, nil
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
    new file mode 100644
    index 0000000000..dc2a64ca64
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
    @@ -0,0 +1,17 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package protolazy
    +
    +import (
    +	"sync/atomic"
    +	"unsafe"
    +)
    +
    +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
    +	return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
    +}
    +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
    +	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
    +}
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
    deleted file mode 100644
    index a1f6f33386..0000000000
    --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build purego || appengine
    -// +build purego appengine
    -
    -package strs
    -
    -import pref "google.golang.org/protobuf/reflect/protoreflect"
    -
    -func UnsafeString(b []byte) string {
    -	return string(b)
    -}
    -
    -func UnsafeBytes(s string) []byte {
    -	return []byte(s)
    -}
    -
    -type Builder struct{}
    -
    -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
    -	return prefix.Append(name)
    -}
    -
    -func (*Builder) MakeString(b []byte) string {
    -	return string(b)
    -}
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
    index a008acd090..832a7988f1 100644
    --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
    +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
    @@ -2,8 +2,7 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine && !go1.21
    -// +build !purego,!appengine,!go1.21
    +//go:build !go1.21
     
     package strs
     
    diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
    index 60166f2ba3..1ffddf6877 100644
    --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
    +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
    @@ -2,8 +2,7 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine && go1.21
    -// +build !purego,!appengine,go1.21
    +//go:build go1.21
     
     package strs
     
    diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
    index a50fcfb49b..f5c06280fe 100644
    --- a/vendor/google.golang.org/protobuf/internal/version/version.go
    +++ b/vendor/google.golang.org/protobuf/internal/version/version.go
    @@ -51,8 +51,8 @@ import (
     //  10. Send out the CL for review and submit it.
     const (
     	Major      = 1
    -	Minor      = 33
    -	Patch      = 0
    +	Minor      = 36
    +	Patch      = 3
     	PreRelease = ""
     )
     
    diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
    index e5b03b5677..e28d7acb37 100644
    --- a/vendor/google.golang.org/protobuf/proto/decode.go
    +++ b/vendor/google.golang.org/protobuf/proto/decode.go
    @@ -47,10 +47,18 @@ type UnmarshalOptions struct {
     	// RecursionLimit limits how deeply messages may be nested.
     	// If zero, a default limit is applied.
     	RecursionLimit int
    +
    +	//
    +	// NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
    +	// default. Lazy decoding only affects submessages (annotated with [lazy =
    +	// true] in the .proto file) within messages that use the Opaque API.
    +	NoLazyDecoding bool
     }
     
     // Unmarshal parses the wire-format message in b and places the result in m.
     // The provided message must be mutable (e.g., a non-nil pointer to a message).
    +//
    +// See the [UnmarshalOptions] type if you need more control.
     func Unmarshal(b []byte, m Message) error {
     	_, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect())
     	return err
    @@ -102,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
     		if o.DiscardUnknown {
     			in.Flags |= protoiface.UnmarshalDiscardUnknown
     		}
    +
    +		if !allowPartial {
    +			// This does not affect how current unmarshal functions work, it just allows them
    +			// to record this for lazy the decoding case.
    +			in.Flags |= protoiface.UnmarshalCheckRequired
    +		}
    +		if o.NoLazyDecoding {
    +			in.Flags |= protoiface.UnmarshalNoLazyDecoding
    +		}
    +
     		out, err = methods.Unmarshal(in)
     	} else {
     		o.RecursionLimit--
    @@ -154,7 +172,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
     		var err error
     		if fd == nil {
     			err = errUnknown
    -		} else if flags.ProtoLegacy {
    +		} else if flags.ProtoLegacyWeak {
     			if fd.IsWeak() && fd.Message().IsPlaceholder() {
     				err = errUnknown // weak referent is not linked in
     			}
    diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
    index 4fed202f9f..f0473c5869 100644
    --- a/vendor/google.golang.org/protobuf/proto/encode.go
    +++ b/vendor/google.golang.org/protobuf/proto/encode.go
    @@ -5,12 +5,17 @@
     package proto
     
     import (
    +	"errors"
    +	"fmt"
    +
     	"google.golang.org/protobuf/encoding/protowire"
     	"google.golang.org/protobuf/internal/encoding/messageset"
     	"google.golang.org/protobuf/internal/order"
     	"google.golang.org/protobuf/internal/pragma"
     	"google.golang.org/protobuf/reflect/protoreflect"
     	"google.golang.org/protobuf/runtime/protoiface"
    +
    +	protoerrors "google.golang.org/protobuf/internal/errors"
     )
     
     // MarshalOptions configures the marshaler.
    @@ -58,7 +63,8 @@ type MarshalOptions struct {
     	// options (except for UseCachedSize itself).
     	//
     	// 2. The message and all its submessages have not changed in any
    -	// way since the Size call.
    +	// way since the Size call. For lazily decoded messages, accessing
    +	// a message results in decoding the message, which is a change.
     	//
     	// If either of these invariants is violated,
     	// the results are undefined and may include panics or corrupted output.
    @@ -70,7 +76,32 @@ type MarshalOptions struct {
     	UseCachedSize bool
     }
     
    +// flags turns the specified MarshalOptions (user-facing) into
    +// protoiface.MarshalInputFlags (used internally by the marshaler).
    +//
    +// See impl.marshalOptions.Options for the inverse operation.
    +func (o MarshalOptions) flags() protoiface.MarshalInputFlags {
    +	var flags protoiface.MarshalInputFlags
    +
    +	// Note: o.AllowPartial is always forced to true by MarshalOptions.marshal,
    +	// which is why it is not a part of MarshalInputFlags.
    +
    +	if o.Deterministic {
    +		flags |= protoiface.MarshalDeterministic
    +	}
    +
    +	if o.UseCachedSize {
    +		flags |= protoiface.MarshalUseCachedSize
    +	}
    +
    +	return flags
    +}
    +
     // Marshal returns the wire-format encoding of m.
    +//
    +// This is the most common entry point for encoding a Protobuf message.
    +//
    +// See the [MarshalOptions] type if you need more control.
     func Marshal(m Message) ([]byte, error) {
     	// Treat nil message interface as an empty message; nothing to output.
     	if m == nil {
    @@ -116,6 +147,9 @@ func emptyBytesForMessage(m Message) []byte {
     
     // MarshalAppend appends the wire-format encoding of m to b,
     // returning the result.
    +//
    +// This is a less common entry point than [Marshal], which is only needed if you
    +// need to supply your own buffers for performance reasons.
     func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
     	// Treat nil message interface as an empty message; nothing to append.
     	if m == nil {
    @@ -145,12 +179,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac
     		in := protoiface.MarshalInput{
     			Message: m,
     			Buf:     b,
    -		}
    -		if o.Deterministic {
    -			in.Flags |= protoiface.MarshalDeterministic
    -		}
    -		if o.UseCachedSize {
    -			in.Flags |= protoiface.MarshalUseCachedSize
    +			Flags:   o.flags(),
     		}
     		if methods.Size != nil {
     			sout := methods.Size(protoiface.SizeInput{
    @@ -168,6 +197,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac
     		out.Buf, err = o.marshalMessageSlow(b, m)
     	}
     	if err != nil {
    +		var mismatch *protoerrors.SizeMismatchError
    +		if errors.As(err, &mismatch) {
    +			return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err)
    +		}
     		return out, err
     	}
     	if allowPartial {
    diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
    index 1a0be1b03c..c36d4a9cd7 100644
    --- a/vendor/google.golang.org/protobuf/proto/equal.go
    +++ b/vendor/google.golang.org/protobuf/proto/equal.go
    @@ -8,6 +8,7 @@ import (
     	"reflect"
     
     	"google.golang.org/protobuf/reflect/protoreflect"
    +	"google.golang.org/protobuf/runtime/protoiface"
     )
     
     // Equal reports whether two messages are equal,
    @@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
     	if mx.IsValid() != my.IsValid() {
     		return false
     	}
    +
    +	// Only one of the messages needs to implement the fast-path for it to work.
    +	pmx := protoMethods(mx)
    +	pmy := protoMethods(my)
    +	if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
    +		return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
    +	}
    +
     	vx := protoreflect.ValueOfMessage(mx)
     	vy := protoreflect.ValueOfMessage(my)
     	return vx.Equal(vy)
    diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
    index 17899a3a76..78445d116f 100644
    --- a/vendor/google.golang.org/protobuf/proto/extension.go
    +++ b/vendor/google.golang.org/protobuf/proto/extension.go
    @@ -11,18 +11,21 @@ import (
     // HasExtension reports whether an extension field is populated.
     // It returns false if m is invalid or if xt does not extend m.
     func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
    -	// Treat nil message interface as an empty message; no populated fields.
    -	if m == nil {
    +	// Treat nil message interface or descriptor as an empty message; no populated
    +	// fields.
    +	if m == nil || xt == nil {
     		return false
     	}
     
     	// As a special-case, we reports invalid or mismatching descriptors
     	// as always not being populated (since they aren't).
    -	if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
    +	mr := m.ProtoReflect()
    +	xd := xt.TypeDescriptor()
    +	if mr.Descriptor() != xd.ContainingMessage() {
     		return false
     	}
     
    -	return m.ProtoReflect().Has(xt.TypeDescriptor())
    +	return mr.Has(xd)
     }
     
     // ClearExtension clears an extension field such that subsequent
    @@ -36,7 +39,49 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
     // If the field is unpopulated, it returns the default value for
     // scalars and an immutable, empty value for lists or messages.
     // It panics if xt does not extend m.
    -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
    +//
    +// The type of the value is dependent on the field type of the extension.
    +// For extensions generated by protoc-gen-go, the Go type is as follows:
    +//
    +//	╔═══════════════════╤═════════════════════════╗
    +//	║ Go type           │ Protobuf kind           ║
    +//	╠═══════════════════╪═════════════════════════╣
    +//	║ bool              │ bool                    ║
    +//	║ int32             │ int32, sint32, sfixed32 ║
    +//	║ int64             │ int64, sint64, sfixed64 ║
    +//	║ uint32            │ uint32, fixed32         ║
    +//	║ uint64            │ uint64, fixed64         ║
    +//	║ float32           │ float                   ║
    +//	║ float64           │ double                  ║
    +//	║ string            │ string                  ║
    +//	║ []byte            │ bytes                   ║
    +//	║ protoreflect.Enum │ enum                    ║
    +//	║ proto.Message     │ message, group          ║
    +//	╚═══════════════════╧═════════════════════════╝
    +//
    +// The protoreflect.Enum and proto.Message types are the concrete Go type
    +// associated with the named enum or message. Repeated fields are represented
    +// using a Go slice of the base element type.
    +//
    +// If a generated extension descriptor variable is directly passed to
    +// GetExtension, then the call should be followed immediately by a
    +// type assertion to the expected output value. For example:
    +//
    +//	mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
    +//
    +// This pattern enables static analysis tools to verify that the asserted type
    +// matches the Go type associated with the extension field and
    +// also enables a possible future migration to a type-safe extension API.
    +//
    +// Since singular messages are the most common extension type, the pattern of
    +// calling HasExtension followed by GetExtension may be simplified to:
    +//
    +//	if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
    +//	    ... // make use of mm
    +//	}
    +//
    +// The mm variable is non-nil if and only if HasExtension reports true.
    +func GetExtension(m Message, xt protoreflect.ExtensionType) any {
     	// Treat nil message interface as an empty message; return the default.
     	if m == nil {
     		return xt.InterfaceOf(xt.Zero())
    @@ -48,7 +93,36 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
     // SetExtension stores the value of an extension field.
     // It panics if m is invalid, xt does not extend m, or if type of v
     // is invalid for the specified extension field.
    -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
    +//
    +// The type of the value is dependent on the field type of the extension.
    +// For extensions generated by protoc-gen-go, the Go type is as follows:
    +//
    +//	╔═══════════════════╤═════════════════════════╗
    +//	║ Go type           │ Protobuf kind           ║
    +//	╠═══════════════════╪═════════════════════════╣
    +//	║ bool              │ bool                    ║
    +//	║ int32             │ int32, sint32, sfixed32 ║
    +//	║ int64             │ int64, sint64, sfixed64 ║
    +//	║ uint32            │ uint32, fixed32         ║
    +//	║ uint64            │ uint64, fixed64         ║
    +//	║ float32           │ float                   ║
    +//	║ float64           │ double                  ║
    +//	║ string            │ string                  ║
    +//	║ []byte            │ bytes                   ║
    +//	║ protoreflect.Enum │ enum                    ║
    +//	║ proto.Message     │ message, group          ║
    +//	╚═══════════════════╧═════════════════════════╝
    +//
    +// The protoreflect.Enum and proto.Message types are the concrete Go type
    +// associated with the named enum or message. Repeated fields are represented
    +// using a Go slice of the base element type.
    +//
    +// If a generated extension descriptor variable is directly passed to
    +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
    +// concrete type that matches the expected Go type for the extension descriptor
    +// so that static analysis tools can verify type correctness.
    +// This also enables a possible future migration to a type-safe extension API.
    +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
     	xd := xt.TypeDescriptor()
     	pv := xt.ValueOf(v)
     
    @@ -75,7 +149,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
     // It returns immediately if f returns false.
     // While iterating, mutating operations may only be performed
     // on the current extension field.
    -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
    +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) {
     	// Treat nil message interface as an empty message; nothing to range over.
     	if m == nil {
     		return
    diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
    index 312d5d45c6..575d14831f 100644
    --- a/vendor/google.golang.org/protobuf/proto/messageset.go
    +++ b/vendor/google.golang.org/protobuf/proto/messageset.go
    @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b
     func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
     	b = messageset.AppendFieldStart(b, fd.Number())
     	b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
    -	b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
    +	calculatedSize := o.Size(value.Message().Interface())
    +	b = protowire.AppendVarint(b, uint64(calculatedSize))
    +	before := len(b)
     	b, err := o.marshalMessage(b, value.Message())
     	if err != nil {
     		return b, err
     	}
    +	if measuredSize := len(b) - before; calculatedSize != measuredSize {
    +		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
    +	}
     	b = messageset.AppendFieldEnd(b)
     	return b, nil
     }
    diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
    index f1692b49b6..c8675806c6 100644
    --- a/vendor/google.golang.org/protobuf/proto/size.go
    +++ b/vendor/google.golang.org/protobuf/proto/size.go
    @@ -12,11 +12,19 @@ import (
     )
     
     // Size returns the size in bytes of the wire-format encoding of m.
    +//
    +// Note that Size might return more bytes than Marshal will write in the case of
    +// lazily decoded messages that arrive in non-minimal wire format: see
    +// https://protobuf.dev/reference/go/size/ for more details.
     func Size(m Message) int {
     	return MarshalOptions{}.Size(m)
     }
     
     // Size returns the size in bytes of the wire-format encoding of m.
    +//
    +// Note that Size might return more bytes than Marshal will write in the case of
    +// lazily decoded messages that arrive in non-minimal wire format: see
    +// https://protobuf.dev/reference/go/size/ for more details.
     func (o MarshalOptions) Size(m Message) int {
     	// Treat a nil message interface as an empty message; nothing to output.
     	if m == nil {
    @@ -34,6 +42,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) {
     	if methods != nil && methods.Size != nil {
     		out := methods.Size(protoiface.SizeInput{
     			Message: m,
    +			Flags:   o.flags(),
     		})
     		return out.Size
     	}
    @@ -42,6 +51,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) {
     		// This case is mainly used for legacy types with a Marshal method.
     		out, _ := methods.Marshal(protoiface.MarshalInput{
     			Message: m,
    +			Flags:   o.flags(),
     		})
     		return len(out.Buf)
     	}
    diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
    new file mode 100644
    index 0000000000..267fd0f1f6
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
    @@ -0,0 +1,80 @@
    +// Copyright 2024 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package proto
    +
    +// ValueOrNil returns nil if has is false, or a pointer to a new variable
    +// containing the value returned by the specified getter.
    +//
    +// This function is similar to the wrappers (proto.Int32(), proto.String(),
    +// etc.), but is generic (works for any field type) and works with the hasser
    +// and getter of a field, as opposed to a value.
    +//
    +// This is convenient when populating builder fields.
    +//
    +// Example:
    +//
    +//	hop := attr.GetDirectHop()
    +//	injectedRoute := ripb.InjectedRoute_builder{
    +//	  Prefixes: route.GetPrefixes(),
    +//	  NextHop:  proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
    +//	}
    +func ValueOrNil[T any](has bool, getter func() T) *T {
    +	if !has {
    +		return nil
    +	}
    +	v := getter()
    +	return &v
    +}
    +
    +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
    +// it returns a pointer to an empty val message.
    +//
    +// This function allows for translating code from the old Open Struct API to the
    +// new Opaque API.
    +//
    +// The old Open Struct API represented oneof fields with a wrapper struct:
    +//
    +//	var signedImg *accountpb.SignedImage
    +//	profile := &accountpb.Profile{
    +//		// The Avatar oneof will be set, with an empty SignedImage.
    +//		Avatar: &accountpb.Profile_SignedImage{signedImg},
    +//	}
    +//
    +// The new Opaque API treats oneof fields like regular fields, there are no more
    +// wrapper structs:
    +//
    +//	var signedImg *accountpb.SignedImage
    +//	profile := &accountpb.Profile{}
    +//	profile.SetSignedImage(signedImg)
    +//
    +// For convenience, the Opaque API also offers Builders, which allow for a
    +// direct translation of struct initialization. However, because Builders use
    +// nilness to represent field presence (but there is no non-nil wrapper struct
    +// anymore), Builders cannot distinguish between an unset oneof and a set oneof
    +// with nil message. The above code would need to be translated with help of the
    +// ValueOrDefault function to retain the same behavior:
    +//
    +//	var signedImg *accountpb.SignedImage
    +//	return &accountpb.Profile_builder{
    +//		SignedImage: proto.ValueOrDefault(signedImg),
    +//	}.Build()
    +func ValueOrDefault[T interface {
    +	*P
    +	Message
    +}, P any](val T) T {
    +	if val == nil {
    +		return T(new(P))
    +	}
    +	return val
    +}
    +
    +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
    +// type []byte.
    +func ValueOrDefaultBytes(val []byte) []byte {
    +	if val == nil {
    +		return []byte{}
    +	}
    +	return val
    +}
    diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go
    new file mode 100644
    index 0000000000..ea276d15a0
    --- /dev/null
    +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go
    @@ -0,0 +1,31 @@
    +// Copyright 2023 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package protoadapt bridges the original and new proto APIs.
    +package protoadapt
    +
    +import (
    +	"google.golang.org/protobuf/proto"
    +	"google.golang.org/protobuf/runtime/protoiface"
    +	"google.golang.org/protobuf/runtime/protoimpl"
    +)
    +
    +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type.
    +type MessageV1 = protoiface.MessageV1
    +
    +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the
    +// current [google.golang.org/protobuf] module, adding support for reflection.
    +type MessageV2 = proto.Message
    +
    +// MessageV1Of converts a v2 message to a v1 message.
    +// It returns nil if m is nil.
    +func MessageV1Of(m MessageV2) MessageV1 {
    +	return protoimpl.X.ProtoMessageV1Of(m)
    +}
    +
    +// MessageV2Of converts a v1 message to a v2 message.
    +// It returns nil if m is nil.
    +func MessageV2Of(m MessageV1) MessageV2 {
    +	return protoimpl.X.ProtoMessageV2Of(m)
    +}
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    index baa0cc6218..69a0505091 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
    @@ -13,6 +13,9 @@
     package protodesc
     
     import (
    +	"strings"
    +
    +	"google.golang.org/protobuf/internal/editionssupport"
     	"google.golang.org/protobuf/internal/errors"
     	"google.golang.org/protobuf/internal/filedesc"
     	"google.golang.org/protobuf/internal/pragma"
    @@ -91,21 +94,27 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
     	switch fd.GetSyntax() {
     	case "proto2", "":
     		f.L1.Syntax = protoreflect.Proto2
    +		f.L1.Edition = filedesc.EditionProto2
     	case "proto3":
     		f.L1.Syntax = protoreflect.Proto3
    +		f.L1.Edition = filedesc.EditionProto3
     	case "editions":
     		f.L1.Syntax = protoreflect.Editions
     		f.L1.Edition = fromEditionProto(fd.GetEdition())
     	default:
     		return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
     	}
    -	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
    -		return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
    -	}
     	f.L1.Path = fd.GetName()
     	if f.L1.Path == "" {
     		return nil, errors.New("file path must be populated")
     	}
    +	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
    +		// Allow cmd/protoc-gen-go/testdata to use any edition for easier
    +		// testing of upcoming edition features.
    +		if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
    +			return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
    +		}
    +	}
     	f.L1.Package = protoreflect.FullName(fd.GetPackage())
     	if !f.L1.Package.IsValid() && f.L1.Package != "" {
     		return nil, errors.New("invalid package: %q", f.L1.Package)
    @@ -114,9 +123,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
     		opts = proto.Clone(opts).(*descriptorpb.FileOptions)
     		f.L2.Options = func() protoreflect.ProtoMessage { return opts }
     	}
    -	if f.L1.Syntax == protoreflect.Editions {
    -		initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
    -	}
    +	initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
     
     	f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
     	for _, i := range fd.GetPublicDependency() {
    @@ -219,10 +226,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
     	if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
     		return nil, err
     	}
    -	if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil {
    +	if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil {
     		return nil, err
     	}
    -	if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil {
    +	if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil {
     		return nil, err
     	}
     
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    index b3278163c5..ebcb4a8ab1 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
    @@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt
     		if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
     			return nil, err
     		}
    -		if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
    -			m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
    -		}
    +		m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
     		if opts := md.GetOptions(); opts != nil {
     			opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
     			m.L2.Options = func() protoreflect.ProtoMessage { return opts }
    @@ -146,13 +144,16 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
     		if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
     			return nil, err
     		}
    +		f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
     		f.L1.IsProto3Optional = fd.GetProto3Optional()
     		if opts := fd.GetOptions(); opts != nil {
     			opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
     			f.L1.Options = func() protoreflect.ProtoMessage { return opts }
     			f.L1.IsWeak = opts.GetWeak()
    -			f.L1.HasPacked = opts.Packed != nil
    -			f.L1.IsPacked = opts.GetPacked()
    +			f.L1.IsLazy = opts.GetLazy()
    +			if opts.Packed != nil {
    +				f.L1.EditionFeatures.IsPacked = opts.GetPacked()
    +			}
     		}
     		f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
     		f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
    @@ -163,32 +164,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
     			f.L1.StringName.InitJSON(fd.GetJsonName())
     		}
     
    -		if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
    -			f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
    -
    -			if f.L1.EditionFeatures.IsLegacyRequired {
    -				f.L1.Cardinality = protoreflect.Required
    -			}
    -			// We reuse the existing field because the old option `[packed =
    -			// true]` is mutually exclusive with the editions feature.
    -			if canBePacked(fd) {
    -				f.L1.HasPacked = true
    -				f.L1.IsPacked = f.L1.EditionFeatures.IsPacked
    -			}
    -
    -			// We pretend this option is always explicitly set because the only
    -			// use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
    -			// or to return the appropriate default.
    -			// When using editions we either parse the option or resolve the
    -			// appropriate default here (instead of later when this option is
    -			// requested from the descriptor).
    -			// In proto2/proto3 syntax HasEnforceUTF8 might be false.
    -			f.L1.HasEnforceUTF8 = true
    -			f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated
    +		if f.L1.EditionFeatures.IsLegacyRequired {
    +			f.L1.Cardinality = protoreflect.Required
    +		}
     
    -			if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
    -				f.L1.Kind = protoreflect.GroupKind
    -			}
    +		if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
    +			f.L1.Kind = protoreflect.GroupKind
     		}
     	}
     	return fs, nil
    @@ -201,12 +182,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc
     		if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
     			return nil, err
     		}
    +		o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures())
     		if opts := od.GetOptions(); opts != nil {
     			opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
     			o.L1.Options = func() protoreflect.ProtoMessage { return opts }
    -			if parent.Syntax() == protoreflect.Editions {
    -				o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures())
    -			}
     		}
     	}
     	return os, nil
    @@ -220,10 +199,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
     		if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
     			return nil, err
     		}
    +		x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures())
     		if opts := xd.GetOptions(); opts != nil {
     			opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
     			x.L2.Options = func() protoreflect.ProtoMessage { return opts }
    -			x.L2.IsPacked = opts.GetPacked()
    +			if opts.Packed != nil {
    +				x.L1.EditionFeatures.IsPacked = opts.GetPacked()
    +			}
     		}
     		x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
     		x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
    @@ -233,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
     		if xd.JsonName != nil {
     			x.L2.StringName.InitJSON(xd.GetJsonName())
     		}
    +		if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
    +			x.L1.Kind = protoreflect.GroupKind
    +		}
     	}
     	return xs, nil
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
    index 254ca58542..f3cebab29c 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
    @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
     			if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
     				return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
     			}
    +			if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
    +				// A map field might inherit delimited encoding from a file-wide default feature.
    +				// But maps never actually use delimited encoding. (At least for now...)
    +				f.L1.Kind = protoreflect.MessageKind
    +			}
     			if fd.DefaultValue != nil {
     				v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
     				if err != nil {
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
    index e4dcaf876c..5eaf652176 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
    @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
     		if allowAlias && !foundAlias {
     			return errors.New("enum %q allows aliases, but none were found", e.FullName())
     		}
    -		if e.Syntax() == protoreflect.Proto3 {
    +		if !e.IsClosed() {
     			if v := e.Values().Get(0); v.Number() != 0 {
    -				return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName())
    +				return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName())
     			}
    -			// Verify that value names in proto3 do not conflict if the
    +			// Verify that value names in open enums do not conflict if the
     			// case-insensitive prefix is removed.
     			// See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
     			names := map[string]protoreflect.EnumValueDescriptor{}
    @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
     				v1 := e.Values().Get(i)
     				s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
     				if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
    -					return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
    +					return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
     				}
     				names[s] = v1
     			}
    @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
     	return nil
     }
     
    -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
    +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
    +	// There are a few limited exceptions only for proto3
    +	isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3)
     	for i, md := range mds {
     		m := &ms[i]
     
    @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
     		if isMessageSet && !flags.ProtoLegacy {
     			return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
     		}
    -		if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
    +		if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
     			return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
     		}
    -		if m.Syntax() == protoreflect.Proto3 {
    +		if isProto3 {
     			if m.ExtensionRanges().Len() > 0 {
     				return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
     			}
    -			// Verify that field names in proto3 do not conflict if lowercased
    -			// with all underscores removed.
    -			// See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
    -			names := map[string]protoreflect.FieldDescriptor{}
    -			for i := 0; i < m.Fields().Len(); i++ {
    -				f1 := m.Fields().Get(i)
    -				s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
    -				if f2, ok := names[s]; ok {
    -					return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
    -				}
    -				names[s] = f1
    -			}
     		}
     
     		for j, fd := range md.GetField() {
    @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
     				return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
     			}
     			if f.L1.IsProto3Optional {
    -				if f.Syntax() != protoreflect.Proto3 {
    +				if !isProto3 {
     					return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
     				}
     				if f.Cardinality() != protoreflect.Optional {
    @@ -159,29 +149,32 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
     					return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
     				}
     			}
    -			if f.IsWeak() && !flags.ProtoLegacy {
    +			if f.IsWeak() && !flags.ProtoLegacyWeak {
     				return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
     			}
    -			if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
    +			if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
     				return errors.New("message field %q may only be weak for an optional message", f.FullName())
     			}
     			if f.IsPacked() && !isPackable(f) {
     				return errors.New("message field %q is not packable", f.FullName())
     			}
    -			if err := checkValidGroup(f); err != nil {
    +			if err := checkValidGroup(file, f); err != nil {
     				return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
     			}
     			if err := checkValidMap(f); err != nil {
     				return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
     			}
    -			if f.Syntax() == protoreflect.Proto3 {
    +			if isProto3 {
     				if f.Cardinality() == protoreflect.Required {
     					return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
     				}
    -				if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 {
    -					return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName())
    +				if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
    +					return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName())
     				}
     			}
    +			if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
    +				return errors.New("message field %q with implicit presence may only use open enums", f.FullName())
    +			}
     		}
     		seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
     		for j := range md.GetOneofDecl() {
    @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
     		if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
     			return err
     		}
    -		if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil {
    +		if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil {
     			return err
     		}
    -		if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil {
    +		if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil {
     			return err
     		}
     	}
     	return nil
     }
     
    -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
    +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
     	for i, xd := range xds {
     		x := &xs[i]
     		// NOTE: Avoid using the IsValid method since extensions to MessageSet
    @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.
     		if x.IsPacked() && !isPackable(x) {
     			return errors.New("extension field %q is not packable", x.FullName())
     		}
    -		if err := checkValidGroup(x); err != nil {
    +		if err := checkValidGroup(f, x); err != nil {
     			return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
     		}
     		if md := x.Message(); md != nil && md.IsMapEntry() {
     			return errors.New("extension field %q cannot be a map entry", x.FullName())
     		}
    -		if x.Syntax() == protoreflect.Proto3 {
    +		if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) {
     			switch x.ContainingMessage().FullName() {
     			case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
     			case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
    @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool {
     
     // checkValidGroup reports whether fd is a valid group according to the same
     // rules that protoc imposes.
    -func checkValidGroup(fd protoreflect.FieldDescriptor) error {
    +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error {
     	md := fd.Message()
     	switch {
     	case fd.Kind() != protoreflect.GroupKind:
     		return nil
    -	case fd.Syntax() == protoreflect.Proto3:
    +	case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3):
     		return errors.New("invalid under proto3 semantics")
     	case md == nil || md.IsPlaceholder():
     		return errors.New("message must be resolvable")
    -	case fd.FullName().Parent() != md.FullName().Parent():
    -		return errors.New("message and field must be declared in the same scope")
    -	case !unicode.IsUpper(rune(md.Name()[0])):
    -		return errors.New("message name must start with an uppercase")
    -	case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
    -		return errors.New("field name must be lowercased form of the message name")
    +	}
    +	if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) {
    +		switch {
    +		case fd.FullName().Parent() != md.FullName().Parent():
    +			return errors.New("message and field must be declared in the same scope")
    +		case !unicode.IsUpper(rune(md.Name()[0])):
    +			return errors.New("message name must start with an uppercase")
    +		case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
    +			return errors.New("field name must be lowercased form of the message name")
    +		}
     	}
     	return nil
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    index 2a6b29d179..f55b036959 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
    @@ -14,12 +14,7 @@ import (
     	"google.golang.org/protobuf/proto"
     	"google.golang.org/protobuf/reflect/protoreflect"
     	"google.golang.org/protobuf/types/descriptorpb"
    -	gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
    -)
    -
    -const (
    -	SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
    -	SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
    +	"google.golang.org/protobuf/types/gofeaturespb"
     )
     
     var defaults = &descriptorpb.FeatureSetDefaults{}
    @@ -48,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
     		return descriptorpb.Edition_EDITION_PROTO3
     	case filedesc.Edition2023:
     		return descriptorpb.Edition_EDITION_2023
    +	case filedesc.Edition2024:
    +		return descriptorpb.Edition_EDITION_2024
     	default:
     		panic(fmt.Sprintf("unknown value for edition: %v", ed))
     	}
    @@ -67,18 +64,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
     		fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
     		os.Exit(1)
     	}
    -	fs := defaults.GetDefaults()[0].GetFeatures()
    +	fsed := defaults.GetDefaults()[0]
     	// Using a linear search for now.
     	// Editions are guaranteed to be sorted and thus we could use a binary search.
     	// Given that there are only a handful of editions (with one more per year)
     	// there is not much reason to use a binary search.
     	for _, def := range defaults.GetDefaults() {
     		if def.GetEdition() <= edpb {
    -			fs = def.GetFeatures()
    +			fsed = def
     		} else {
     			break
     		}
     	}
    +	fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet)
    +	proto.Merge(fs, fsed.GetOverridableFeatures())
     	defaultsCache[ed] = fs
     	return fs
     }
    @@ -126,10 +125,27 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
     		parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
     	}
     
    -	if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil {
    -		if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
    -			parentFS.GenerateLegacyUnmarshalJSON = *luje
    -		}
    +	// We must not use proto.GetExtension(child, gofeaturespb.E_Go)
    +	// because that only works for messages we generated, but not for
    +	// dynamicpb messages. See golang/protobuf#1669.
    +	goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
    +	if !goFeatures.IsValid() {
    +		return parentFS
    +	}
    +	// gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
    +	gf := goFeatures.Message()
    +	fields := gf.Descriptor().Fields()
    +
    +	if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) {
    +		parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
    +	}
    +
    +	if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) {
    +		parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
    +	}
    +
    +	if fd := fields.ByName("api_level"); gf.Has(fd) {
    +		parentFS.APILevel = int(gf.Get(fd).Enum())
     	}
     
     	return parentFS
    diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    index 9d6e05420f..a5de8d4001 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
    @@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
     	if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
     		p.Syntax = proto.String(file.Syntax().String())
     	}
    +	if file.Syntax() == protoreflect.Editions {
    +		desc := file
    +		if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
    +			desc = fileImportDesc.FileDescriptor
    +		}
    +
    +		if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
    +			p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
    +		}
    +	}
     	return p
     }
     
    @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi
     	if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
     		p.Proto3Optional = proto.Bool(true)
     	}
    +	if field.Syntax() == protoreflect.Editions {
    +		// Editions have no group keyword, this type is only set so that downstream users continue
    +		// treating this as delimited encoding.
    +		if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
    +			p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
    +		}
    +		// Editions have no required keyword, this label is only set so that downstream users continue
    +		// treating it as required.
    +		if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
    +			p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
    +		}
    +	}
     	if field.HasDefault() {
     		def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
     		if err != nil && field.DefaultEnumValue() != nil {
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
    index d5d5af6ebe..742cb518c4 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
    @@ -23,6 +23,7 @@ type (
     		Unmarshal        func(unmarshalInput) (unmarshalOutput, error)
     		Merge            func(mergeInput) mergeOutput
     		CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
    +		Equal            func(equalInput) equalOutput
     	}
     	supportFlags = uint64
     	sizeInput    = struct {
    @@ -75,4 +76,13 @@ type (
     	checkInitializedOutput = struct {
     		pragma.NoUnkeyedLiterals
     	}
    +	equalInput = struct {
    +		pragma.NoUnkeyedLiterals
    +		MessageA Message
    +		MessageB Message
    +	}
    +	equalOutput = struct {
    +		pragma.NoUnkeyedLiterals
    +		Equal bool
    +	}
     )
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
    index 00b01fbd8c..c85bfaa5bb 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
    @@ -161,7 +161,7 @@ const (
     // IsValid reports whether the syntax is valid.
     func (s Syntax) IsValid() bool {
     	switch s {
    -	case Proto2, Proto3:
    +	case Proto2, Proto3, Editions:
     		return true
     	default:
     		return false
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    index 7dcc2ff09e..ea154eec44 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
    @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
     		b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
     	case 21:
     		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
    +	case 22:
    +		b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
     	case 999:
     		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
     	}
    @@ -483,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
     		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
     	case 3:
     		b = p.appendSingularField(b, "debug_redact", nil)
    +	case 4:
    +		b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
     	case 999:
     		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
     	}
    @@ -519,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte {
     	return b
     }
     
    +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte {
    +	if len(*p) == 0 {
    +		return b
    +	}
    +	switch (*p)[0] {
    +	case 1:
    +		b = p.appendSingularField(b, "edition_introduced", nil)
    +	case 2:
    +		b = p.appendSingularField(b, "edition_deprecated", nil)
    +	case 3:
    +		b = p.appendSingularField(b, "deprecation_warning", nil)
    +	case 4:
    +		b = p.appendSingularField(b, "edition_removed", nil)
    +	}
    +	return b
    +}
    +
     func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
     	if len(*p) == 0 {
     		return b
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
    index 60ff62b4c8..cd8fadbaf8 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
    @@ -510,7 +510,7 @@ type ExtensionType interface {
     	//
     	// ValueOf is more extensive than protoreflect.ValueOf for a given field's
     	// value as it has more type information available.
    -	ValueOf(interface{}) Value
    +	ValueOf(any) Value
     
     	// InterfaceOf completely unwraps the Value to the underlying Go type.
     	// InterfaceOf panics if the input is nil or does not represent the
    @@ -519,13 +519,13 @@ type ExtensionType interface {
     	//
     	// InterfaceOf is able to unwrap the Value further than Value.Interface
     	// as it has more type information available.
    -	InterfaceOf(Value) interface{}
    +	InterfaceOf(Value) any
     
     	// IsValidValue reports whether the Value is valid to assign to the field.
     	IsValidValue(Value) bool
     
     	// IsValidInterface reports whether the input is valid to assign to the field.
    -	IsValidInterface(interface{}) bool
    +	IsValidInterface(any) bool
     }
     
     // EnumDescriptor describes an enum and
    @@ -544,6 +544,12 @@ type EnumDescriptor interface {
     	// ReservedRanges is a list of reserved ranges of enum numbers.
     	ReservedRanges() EnumRanges
     
    +	// IsClosed reports whether this enum uses closed semantics.
    +	// See https://protobuf.dev/programming-guides/enum/#definitions.
    +	// Note: the Go protobuf implementation is not spec compliant and treats
    +	// all enums as open enums.
    +	IsClosed() bool
    +
     	isEnumDescriptor
     }
     type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
    index a7b0d06ff3..a4b78acef6 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
    @@ -152,7 +152,7 @@ type Message interface {
     	// This method may return nil.
     	//
     	// The returned methods type is identical to
    -	// google.golang.org/protobuf/runtime/protoiface.Methods.
    +	// [google.golang.org/protobuf/runtime/protoiface.Methods].
     	// Consult the protoiface package documentation for details.
     	ProtoMethods() *methods
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
    deleted file mode 100644
    index 7ced876f4e..0000000000
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
    +++ /dev/null
    @@ -1,60 +0,0 @@
    -// Copyright 2018 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -//go:build purego || appengine
    -// +build purego appengine
    -
    -package protoreflect
    -
    -import "google.golang.org/protobuf/internal/pragma"
    -
    -type valueType int
    -
    -const (
    -	nilType valueType = iota
    -	boolType
    -	int32Type
    -	int64Type
    -	uint32Type
    -	uint64Type
    -	float32Type
    -	float64Type
    -	stringType
    -	bytesType
    -	enumType
    -	ifaceType
    -)
    -
    -// value is a union where only one type can be represented at a time.
    -// This uses a distinct field for each type. This is type safe in Go, but
    -// occupies more memory than necessary (72B).
    -type value struct {
    -	pragma.DoNotCompare // 0B
    -
    -	typ   valueType   // 8B
    -	num   uint64      // 8B
    -	str   string      // 16B
    -	bin   []byte      // 24B
    -	iface interface{} // 16B
    -}
    -
    -func valueOfString(v string) Value {
    -	return Value{typ: stringType, str: v}
    -}
    -func valueOfBytes(v []byte) Value {
    -	return Value{typ: bytesType, bin: v}
    -}
    -func valueOfIface(v interface{}) Value {
    -	return Value{typ: ifaceType, iface: v}
    -}
    -
    -func (v Value) getString() string {
    -	return v.str
    -}
    -func (v Value) getBytes() []byte {
    -	return v.bin
    -}
    -func (v Value) getIface() interface{} {
    -	return v.iface
    -}
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
    index 1603097311..9fe83cef5a 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
    @@ -69,8 +69,8 @@ import (
     // composite Value. Modifying an empty, read-only value panics.
     type Value value
     
    -// The protoreflect API uses a custom Value union type instead of interface{}
    -// to keep the future open for performance optimizations. Using an interface{}
    +// The protoreflect API uses a custom Value union type instead of any
    +// to keep the future open for performance optimizations. Using an any
     // always incurs an allocation for primitives (e.g., int64) since it needs to
     // be boxed on the heap (as interfaces can only contain pointers natively).
     // Instead, we represent the Value union as a flat struct that internally keeps
    @@ -85,7 +85,7 @@ type Value value
     // ValueOf returns a Value initialized with the concrete value stored in v.
     // This panics if the type does not match one of the allowed types in the
     // Value union.
    -func ValueOf(v interface{}) Value {
    +func ValueOf(v any) Value {
     	switch v := v.(type) {
     	case nil:
     		return Value{}
    @@ -192,10 +192,10 @@ func (v Value) IsValid() bool {
     	return v.typ != nilType
     }
     
    -// Interface returns v as an interface{}.
    +// Interface returns v as an any.
     //
     // Invariant: v == ValueOf(v).Interface()
    -func (v Value) Interface() interface{} {
    +func (v Value) Interface() any {
     	switch v.typ {
     	case nilType:
     		return nil
    @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool {
     	return Value(k).IsValid()
     }
     
    -// Interface returns k as an interface{}.
    -func (k MapKey) Interface() interface{} {
    +// Interface returns k as an any.
    +func (k MapKey) Interface() any {
     	return Value(k).Interface()
     }
     
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
    index b1fdbe3e8e..0015fcb35d 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
    @@ -2,8 +2,7 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine && !go1.21
    -// +build !purego,!appengine,!go1.21
    +//go:build !go1.21
     
     package protoreflect
     
    @@ -45,7 +44,7 @@ var (
     
     // typeOf returns a pointer to the Go type information.
     // The pointer is comparable and equal if and only if the types are identical.
    -func typeOf(t interface{}) unsafe.Pointer {
    +func typeOf(t any) unsafe.Pointer {
     	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
     }
     
    @@ -80,7 +79,7 @@ func valueOfBytes(v []byte) Value {
     	p := (*sliceHeader)(unsafe.Pointer(&v))
     	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
     }
    -func valueOfIface(v interface{}) Value {
    +func valueOfIface(v any) Value {
     	p := (*ifaceHeader)(unsafe.Pointer(&v))
     	return Value{typ: p.Type, ptr: p.Data}
     }
    @@ -93,7 +92,7 @@ func (v Value) getBytes() (x []byte) {
     	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
     	return x
     }
    -func (v Value) getIface() (x interface{}) {
    +func (v Value) getIface() (x any) {
     	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
     	return x
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
    index 4354701117..479527b58d 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
    @@ -2,8 +2,7 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -//go:build !purego && !appengine && go1.21
    -// +build !purego,!appengine,go1.21
    +//go:build go1.21
     
     package protoreflect
     
    @@ -15,7 +14,7 @@ import (
     
     type (
     	ifaceHeader struct {
    -		_    [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
    +		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
     		Type unsafe.Pointer
     		Data unsafe.Pointer
     	}
    @@ -37,7 +36,7 @@ var (
     
     // typeOf returns a pointer to the Go type information.
     // The pointer is comparable and equal if and only if the types are identical.
    -func typeOf(t interface{}) unsafe.Pointer {
    +func typeOf(t any) unsafe.Pointer {
     	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
     }
     
    @@ -70,7 +69,7 @@ func valueOfString(v string) Value {
     func valueOfBytes(v []byte) Value {
     	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
     }
    -func valueOfIface(v interface{}) Value {
    +func valueOfIface(v any) Value {
     	p := (*ifaceHeader)(unsafe.Pointer(&v))
     	return Value{typ: p.Type, ptr: p.Data}
     }
    @@ -81,7 +80,7 @@ func (v Value) getString() string {
     func (v Value) getBytes() []byte {
     	return unsafe.Slice((*byte)(v.ptr), v.num)
     }
    -func (v Value) getIface() (x interface{}) {
    +func (v Value) getIface() (x any) {
     	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
     	return x
     }
    diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
    index 6267dc52a6..de17773391 100644
    --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
    +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
    @@ -95,7 +95,7 @@ type Files struct {
     	// multiple files. Only top-level declarations are registered.
     	// Note that enum values are in the top-level since that are in the same
     	// scope as the parent enum.
    -	descsByName map[protoreflect.FullName]interface{}
    +	descsByName map[protoreflect.FullName]any
     	filesByPath map[string][]protoreflect.FileDescriptor
     	numFiles    int
     }
    @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
     		defer globalMutex.Unlock()
     	}
     	if r.descsByName == nil {
    -		r.descsByName = map[protoreflect.FullName]interface{}{
    +		r.descsByName = map[protoreflect.FullName]any{
     			"": &packageDescriptor{},
     		}
     		r.filesByPath = make(map[string][]protoreflect.FileDescriptor)
    @@ -485,7 +485,7 @@ type Types struct {
     }
     
     type (
    -	typesByName         map[protoreflect.FullName]interface{}
    +	typesByName         map[protoreflect.FullName]any
     	extensionsByMessage map[protoreflect.FullName]extensionsByNumber
     	extensionsByNumber  map[protoreflect.FieldNumber]protoreflect.ExtensionType
     )
    @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
     	return nil
     }
     
    -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
    +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error {
     	name := desc.FullName()
     	prev := r.typesByName[name]
     	if prev != nil {
    @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p
     	}
     }
     
    -func typeName(t interface{}) string {
    +func typeName(t any) string {
     	switch t.(type) {
     	case protoreflect.EnumType:
     		return "enum"
    @@ -854,7 +854,7 @@ func typeName(t interface{}) string {
     	}
     }
     
    -func amendErrorWithCaller(err error, prev, curr interface{}) error {
    +func amendErrorWithCaller(err error, prev, curr any) error {
     	prevPkg := goPackage(prev)
     	currPkg := goPackage(curr)
     	if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
    @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error {
     	return errors.New("%s\n\tpreviously from: %q\n\tcurrently from:  %q", err, prevPkg, currPkg)
     }
     
    -func goPackage(v interface{}) string {
    +func goPackage(v any) string {
     	switch d := v.(type) {
     	case protoreflect.EnumType:
     		v = d.Descriptor()
    diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
    index 44cf467d88..28e9e9f039 100644
    --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
    +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
    @@ -39,6 +39,9 @@ type Methods = struct {
     
     	// CheckInitialized returns an error if any required fields in the message are not set.
     	CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
    +
    +	// Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
    +	Equal func(EqualInput) EqualOutput
     }
     
     // SupportFlags indicate support for optional features.
    @@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8
     
     const (
     	UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
    +
    +	// UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
    +	// The unmarshaller must not modify the contents of the buffer.
    +	UnmarshalAliasBuffer
    +
    +	// UnmarshalValidated indicates that validation has already been
    +	// performed on the input buffer.
    +	UnmarshalValidated
    +
    +	// UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
    +	// initialized.
    +	UnmarshalCheckRequired
    +
    +	// UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
    +	// lazy decoding, even when otherwise available.
    +	UnmarshalNoLazyDecoding
     )
     
     // UnmarshalOutputFlags are output from the Unmarshal method.
    @@ -166,3 +185,18 @@ type CheckInitializedInput = struct {
     type CheckInitializedOutput = struct {
     	pragma.NoUnkeyedLiterals
     }
    +
    +// EqualInput is input to the Equal method.
    +type EqualInput = struct {
    +	pragma.NoUnkeyedLiterals
    +
    +	MessageA protoreflect.Message
    +	MessageB protoreflect.Message
    +}
    +
    +// EqualOutput is output from the Equal method.
    +type EqualOutput = struct {
    +	pragma.NoUnkeyedLiterals
    +
    +	Equal bool
    +}
    diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
    index 4a1ab7fb3d..93df1b569b 100644
    --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
    +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
    @@ -15,6 +15,7 @@ import (
     	"google.golang.org/protobuf/internal/filedesc"
     	"google.golang.org/protobuf/internal/filetype"
     	"google.golang.org/protobuf/internal/impl"
    +	"google.golang.org/protobuf/internal/protolazy"
     )
     
     // UnsafeEnabled specifies whether package unsafe can be used.
    @@ -39,6 +40,9 @@ type (
     	ExtensionFieldV1 = impl.ExtensionField
     
     	Pointer = impl.Pointer
    +
    +	LazyUnmarshalInfo  = *protolazy.XXX_lazyUnmarshalInfo
    +	RaceDetectHookData = impl.RaceDetectHookData
     )
     
     var X impl.Export
    diff --git a/vendor/google.golang.org/protobuf/testing/protocmp/reflect.go b/vendor/google.golang.org/protobuf/testing/protocmp/reflect.go
    index 0a5e47467a..36f7cd9e6e 100644
    --- a/vendor/google.golang.org/protobuf/testing/protocmp/reflect.go
    +++ b/vendor/google.golang.org/protobuf/testing/protocmp/reflect.go
    @@ -16,7 +16,7 @@ import (
     	"google.golang.org/protobuf/runtime/protoiface"
     )
     
    -func reflectValueOf(v interface{}) protoreflect.Value {
    +func reflectValueOf(v any) protoreflect.Value {
     	switch v := v.(type) {
     	case Enum:
     		return protoreflect.ValueOfEnum(v.Number())
    diff --git a/vendor/google.golang.org/protobuf/testing/protocmp/util.go b/vendor/google.golang.org/protobuf/testing/protocmp/util.go
    index dec34f20c4..2282811b6e 100644
    --- a/vendor/google.golang.org/protobuf/testing/protocmp/util.go
    +++ b/vendor/google.golang.org/protobuf/testing/protocmp/util.go
    @@ -32,7 +32,7 @@ var (
     //     values of a map field, or standalone [Enum] values
     //   - [][Enum] for list fields
     //   - map[K][Enum] for map fields
    -//   - interface{} for a [Message] map entry value
    +//   - any for a [Message] map entry value
     //
     // This must be used in conjunction with [Transform].
     func FilterEnum(enum protoreflect.Enum, opt cmp.Option) cmp.Option {
    @@ -48,7 +48,7 @@ func FilterEnum(enum protoreflect.Enum, opt cmp.Option) cmp.Option {
     //     values of a map field, or standalone [Message] values
     //   - [][Message] for list fields
     //   - map[K][Message] for map fields
    -//   - interface{} for a [Message] map entry value
    +//   - any for a [Message] map entry value
     //
     // This must be used in conjunction with [Transform].
     func FilterMessage(message proto.Message, opt cmp.Option) cmp.Option {
    @@ -62,7 +62,7 @@ func FilterMessage(message proto.Message, opt cmp.Option) cmp.Option {
     //   - T for singular fields
     //   - []T for list fields
     //   - map[K]T for map fields
    -//   - interface{} for a [Message] map entry value
    +//   - any for a [Message] map entry value
     //
     // This must be used in conjunction with [Transform].
     func FilterField(message proto.Message, name protoreflect.Name, opt cmp.Option) cmp.Option {
    @@ -78,7 +78,7 @@ func FilterField(message proto.Message, name protoreflect.Name, opt cmp.Option)
     //   - T for singular fields
     //   - []T for list fields
     //   - map[K]T for map fields
    -//   - interface{} for a [Message] map entry value
    +//   - any for a [Message] map entry value
     //
     // This must be used in conjunction with [Transform].
     func FilterOneof(message proto.Message, name protoreflect.Name, opt cmp.Option) cmp.Option {
    @@ -524,8 +524,11 @@ func IgnoreUnknown() cmp.Option {
     // handled by this option. To sort Go slices that are not repeated fields,
     // consider using [github.com/google/go-cmp/cmp/cmpopts.SortSlices] instead.
     //
    +// The sorting of messages does not take into account ignored fields or oneofs
    +// as a result of [IgnoreFields] or [IgnoreOneofs].
    +//
     // This must be used in conjunction with [Transform].
    -func SortRepeated(lessFunc interface{}) cmp.Option {
    +func SortRepeated(lessFunc any) cmp.Option {
     	t, ok := checkTTBFunc(lessFunc)
     	if !ok {
     		panic(fmt.Sprintf("invalid less function: %T", lessFunc))
    @@ -589,7 +592,7 @@ func SortRepeated(lessFunc interface{}) cmp.Option {
     	}, opt)
     }
     
    -func checkTTBFunc(lessFunc interface{}) (reflect.Type, bool) {
    +func checkTTBFunc(lessFunc any) (reflect.Type, bool) {
     	switch t := reflect.TypeOf(lessFunc); {
     	case t == nil:
     		return nil, false
    @@ -624,6 +627,9 @@ func checkTTBFunc(lessFunc interface{}) (reflect.Type, bool) {
     //	    ... // user-provided definition for less
     //	}))
     //
    +// The sorting of messages does not take into account ignored fields or oneofs
    +// as a result of [IgnoreFields] or [IgnoreOneofs].
    +//
     // This must be used in conjunction with [Transform].
     func SortRepeatedFields(message proto.Message, names ...protoreflect.Name) cmp.Option {
     	var opts cmp.Options
    @@ -634,7 +640,7 @@ func SortRepeatedFields(message proto.Message, names ...protoreflect.Name) cmp.O
     			panic(fmt.Sprintf("message field %q is not repeated", fd.FullName()))
     		}
     
    -		var lessFunc interface{}
    +		var lessFunc any
     		switch fd.Kind() {
     		case protoreflect.BoolKind:
     			lessFunc = func(x, y bool) bool { return !x && y }
    diff --git a/vendor/google.golang.org/protobuf/testing/protocmp/xform.go b/vendor/google.golang.org/protobuf/testing/protocmp/xform.go
    index 0a1aef9b40..de29d97326 100644
    --- a/vendor/google.golang.org/protobuf/testing/protocmp/xform.go
    +++ b/vendor/google.golang.org/protobuf/testing/protocmp/xform.go
    @@ -115,7 +115,7 @@ func (t1 messageMeta) Equal(t2 messageMeta) bool {
     // of the encoded field (as the [protoreflect.RawFields] type).
     //
     // Message values must not be created by or mutated by users.
    -type Message map[string]interface{}
    +type Message map[string]any
     
     // Unwrap returns the original message value.
     // It returns nil if this Message was not constructed from another message.
    @@ -226,7 +226,7 @@ func Transform(opts ...option) cmp.Option {
     		}
     
     		return false
    -	}, cmp.Transformer("protocmp.Transform", func(v interface{}) Message {
    +	}, cmp.Transformer("protocmp.Transform", func(v any) Message {
     		// For user convenience, shallow copy the message value if necessary
     		// in order for it to implement the message interface.
     		if rv := reflect.ValueOf(v); rv.IsValid() && rv.Kind() != reflect.Ptr && !isMessageType(rv.Type()) {
    @@ -303,7 +303,7 @@ func (xf *transformer) transformMessage(m protoreflect.Message) Message {
     	return mx
     }
     
    -func (xf *transformer) transformList(fd protoreflect.FieldDescriptor, lv protoreflect.List) interface{} {
    +func (xf *transformer) transformList(fd protoreflect.FieldDescriptor, lv protoreflect.List) any {
     	t := protoKindToGoType(fd.Kind())
     	rv := reflect.MakeSlice(reflect.SliceOf(t), lv.Len(), lv.Len())
     	for i := 0; i < lv.Len(); i++ {
    @@ -313,7 +313,7 @@ func (xf *transformer) transformList(fd protoreflect.FieldDescriptor, lv protore
     	return rv.Interface()
     }
     
    -func (xf *transformer) transformMap(fd protoreflect.FieldDescriptor, mv protoreflect.Map) interface{} {
    +func (xf *transformer) transformMap(fd protoreflect.FieldDescriptor, mv protoreflect.Map) any {
     	kfd := fd.MapKey()
     	vfd := fd.MapValue()
     	kt := protoKindToGoType(kfd.Kind())
    @@ -328,7 +328,7 @@ func (xf *transformer) transformMap(fd protoreflect.FieldDescriptor, mv protoref
     	return rv.Interface()
     }
     
    -func (xf *transformer) transformSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) interface{} {
    +func (xf *transformer) transformSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) any {
     	switch fd.Kind() {
     	case protoreflect.EnumKind:
     		return Enum{num: v.Enum(), ed: fd.Enum()}
    diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    index 78624cf60b..a551e7ae94 100644
    --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
    @@ -54,6 +54,9 @@ type Edition int32
     const (
     	// A placeholder for an unknown edition value.
     	Edition_EDITION_UNKNOWN Edition = 0
    +	// A placeholder edition for specifying default behaviors *before* a feature
    +	// was first introduced.  This is effectively an "infinite past".
    +	Edition_EDITION_LEGACY Edition = 900
     	// Legacy syntax "editions".  These pre-date editions, but behave much like
     	// distinct editions.  These can't be used to specify the edition of proto
     	// files, but feature definitions must supply proto2/proto3 defaults for
    @@ -66,7 +69,7 @@ const (
     	Edition_EDITION_2023 Edition = 1000
     	Edition_EDITION_2024 Edition = 1001
     	// Placeholder editions for testing feature resolution.  These should not be
    -	// used or relyed on outside of tests.
    +	// used or relied on outside of tests.
     	Edition_EDITION_1_TEST_ONLY     Edition = 1
     	Edition_EDITION_2_TEST_ONLY     Edition = 2
     	Edition_EDITION_99997_TEST_ONLY Edition = 99997
    @@ -82,6 +85,7 @@ const (
     var (
     	Edition_name = map[int32]string{
     		0:          "EDITION_UNKNOWN",
    +		900:        "EDITION_LEGACY",
     		998:        "EDITION_PROTO2",
     		999:        "EDITION_PROTO3",
     		1000:       "EDITION_2023",
    @@ -95,6 +99,7 @@ var (
     	}
     	Edition_value = map[string]int32{
     		"EDITION_UNKNOWN":         0,
    +		"EDITION_LEGACY":          900,
     		"EDITION_PROTO2":          998,
     		"EDITION_PROTO3":          999,
     		"EDITION_2023":            1000,
    @@ -572,8 +577,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
     }
     
     // If set to RETENTION_SOURCE, the option will be omitted from the binary.
    -// Note: as of January 2023, support for this is in progress and does not yet
    -// have an effect (b/264593489).
     type FieldOptions_OptionRetention int32
     
     const (
    @@ -635,8 +638,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) {
     
     // This indicates the types of entities that the field may apply to when used
     // as an option. If it is unset, then the field may be freely used as an
    -// option on any kind of entity. Note: as of January 2023, support for this is
    -// in progress and does not yet have an effect (b/264593489).
    +// option on any kind of entity.
     type FieldOptions_OptionTargetType int32
     
     const (
    @@ -1203,20 +1205,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
     // The protocol compiler can output a FileDescriptorSet containing the .proto
     // files it parses.
     type FileDescriptorSet struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    -	File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
    +	state           protoimpl.MessageState `protogen:"open.v1"`
    +	File            []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
    +	extensionFields protoimpl.ExtensionFields
    +	unknownFields   protoimpl.UnknownFields
    +	sizeCache       protoimpl.SizeCache
     }
     
     func (x *FileDescriptorSet) Reset() {
     	*x = FileDescriptorSet{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FileDescriptorSet) String() string {
    @@ -1227,7 +1227,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
     
     func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1251,12 +1251,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto {
     
     // Describes a complete .proto file.
     type FileDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    -	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
    -	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
    +	state   protoimpl.MessageState `protogen:"open.v1"`
    +	Name    *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
    +	Package *string                `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
     	// Names of files imported by this file.
     	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
     	// Indexes of the public imported files in the dependency list above.
    @@ -1281,16 +1278,16 @@ type FileDescriptorProto struct {
     	// If `edition` is present, this value must be "editions".
     	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
     	// The edition of the proto file.
    -	Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    +	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *FileDescriptorProto) Reset() {
     	*x = FileDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FileDescriptorProto) String() string {
    @@ -1301,7 +1298,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
     
     func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1409,10 +1406,7 @@ func (x *FileDescriptorProto) GetEdition() Edition {
     
     // Describes a message type.
     type DescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state          protoimpl.MessageState            `protogen:"open.v1"`
     	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
     	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
     	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
    @@ -1424,16 +1418,16 @@ type DescriptorProto struct {
     	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
     	// Reserved field names, which may not be used by fields in the same message.
     	// A given name may only be reserved once.
    -	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *DescriptorProto) Reset() {
     	*x = DescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *DescriptorProto) String() string {
    @@ -1444,7 +1438,7 @@ func (*DescriptorProto) ProtoMessage() {}
     
     func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1530,11 +1524,7 @@ func (x *DescriptorProto) GetReservedName() []string {
     }
     
     type ExtensionRangeOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
     	// For external users: DO NOT USE. We are in the process of open sourcing
    @@ -1546,7 +1536,10 @@ type ExtensionRangeOptions struct {
     	// The verification state of the range.
     	// TODO: flip the default to DECLARATION once all empty ranges
     	// are marked as UNVERIFIED.
    -	Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
    +	Verification    *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
    +	extensionFields protoimpl.ExtensionFields
    +	unknownFields   protoimpl.UnknownFields
    +	sizeCache       protoimpl.SizeCache
     }
     
     // Default values for ExtensionRangeOptions fields.
    @@ -1556,11 +1549,9 @@ const (
     
     func (x *ExtensionRangeOptions) Reset() {
     	*x = ExtensionRangeOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ExtensionRangeOptions) String() string {
    @@ -1571,7 +1562,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
     
     func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1616,10 +1607,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica
     
     // Describes a field within a message.
     type FieldDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state  protoimpl.MessageState      `protogen:"open.v1"`
     	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
     	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
     	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
    @@ -1671,15 +1659,15 @@ type FieldDescriptorProto struct {
     	// Proto2 optional fields do not set this flag, because they already indicate
     	// optional with `LABEL_OPTIONAL`.
     	Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
    +	unknownFields  protoimpl.UnknownFields
    +	sizeCache      protoimpl.SizeCache
     }
     
     func (x *FieldDescriptorProto) Reset() {
     	*x = FieldDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FieldDescriptorProto) String() string {
    @@ -1690,7 +1678,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
     
     func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1784,21 +1772,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool {
     
     // Describes a oneof.
     type OneofDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    +	Options       *OneofOptions          `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	Name    *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    -	Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *OneofDescriptorProto) Reset() {
     	*x = OneofDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *OneofDescriptorProto) String() string {
    @@ -1809,7 +1794,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
     
     func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1840,10 +1825,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions {
     
     // Describes an enum type.
     type EnumDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state   protoimpl.MessageState      `protogen:"open.v1"`
     	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
     	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
     	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
    @@ -1853,16 +1835,16 @@ type EnumDescriptorProto struct {
     	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
     	// Reserved enum value names, which may not be reused. A given name may only
     	// be reserved once.
    -	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *EnumDescriptorProto) Reset() {
     	*x = EnumDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *EnumDescriptorProto) String() string {
    @@ -1873,7 +1855,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
     
     func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1925,22 +1907,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
     
     // Describes a value within an enum.
     type EnumValueDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    +	Number        *int32                 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
    +	Options       *EnumValueOptions      `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	Name    *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    -	Number  *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
    -	Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *EnumValueDescriptorProto) Reset() {
     	*x = EnumValueDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *EnumValueDescriptorProto) String() string {
    @@ -1951,7 +1930,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
     
     func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -1989,22 +1968,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
     
     // Describes a service.
     type ServiceDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState   `protogen:"open.v1"`
    +	Name          *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    +	Method        []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
    +	Options       *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	Name    *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    -	Method  []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
    -	Options *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *ServiceDescriptorProto) Reset() {
     	*x = ServiceDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ServiceDescriptorProto) String() string {
    @@ -2015,7 +1991,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
     
     func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2053,11 +2029,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions {
     
     // Describes a method of a service.
     type MethodDescriptorProto struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    -	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
    +	state protoimpl.MessageState `protogen:"open.v1"`
    +	Name  *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
     	// Input and output type names.  These are resolved in the same way as
     	// FieldDescriptorProto.type_name, but must refer to a message type.
     	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
    @@ -2067,6 +2040,8 @@ type MethodDescriptorProto struct {
     	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
     	// Identifies if server streams multiple server messages
     	ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
    +	unknownFields   protoimpl.UnknownFields
    +	sizeCache       protoimpl.SizeCache
     }
     
     // Default values for MethodDescriptorProto fields.
    @@ -2077,11 +2052,9 @@ const (
     
     func (x *MethodDescriptorProto) Reset() {
     	*x = MethodDescriptorProto{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *MethodDescriptorProto) String() string {
    @@ -2092,7 +2065,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
     
     func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2150,11 +2123,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool {
     }
     
     type FileOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Sets the Java package where classes generated from this .proto will be
     	// placed.  By default, the proto package is used, but this is often
     	// inappropriate because proto packages do not normally start with backwards
    @@ -2177,12 +2146,16 @@ type FileOptions struct {
     	//
     	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
     	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
    -	// If set true, then the Java2 code generator will generate code that
    -	// throws an exception whenever an attempt is made to assign a non-UTF-8
    -	// byte sequence to a string field.
    -	// Message reflection will do the same.
    -	// However, an extension field still accepts non-UTF-8 byte sequences.
    -	// This option has no effect on when used with the lite runtime.
    +	// A proto2 file can set this to true to opt in to UTF-8 checking for Java,
    +	// which will throw an exception if invalid UTF-8 is parsed from the wire or
    +	// assigned to a string field.
    +	//
    +	// TODO: clarify exactly what kinds of field types this option
    +	// applies to, and update these docs accordingly.
    +	//
    +	// Proto3 files already perform these checks. Setting the option explicitly to
    +	// false has no effect: it cannot be used to opt proto3 files out of UTF-8
    +	// checks.
     	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
     	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
     	// Sets the Go package where structs generated from this .proto will be
    @@ -2242,6 +2215,9 @@ type FileOptions struct {
     	// The parser stores options it doesn't recognize here.
     	// See the documentation for the "Options" section above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for FileOptions fields.
    @@ -2258,11 +2234,9 @@ const (
     
     func (x *FileOptions) Reset() {
     	*x = FileOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FileOptions) String() string {
    @@ -2273,7 +2247,7 @@ func (*FileOptions) ProtoMessage() {}
     
     func (x *FileOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2437,11 +2411,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type MessageOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Set true to use the old proto1 MessageSet wire format for extensions.
     	// This is provided for backwards-compatibility with the MessageSet wire
     	// format.  You should not use this for any other reason:  It's less
    @@ -2514,6 +2484,9 @@ type MessageOptions struct {
     	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for MessageOptions fields.
    @@ -2525,11 +2498,9 @@ const (
     
     func (x *MessageOptions) Reset() {
     	*x = MessageOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *MessageOptions) String() string {
    @@ -2540,7 +2511,7 @@ func (*MessageOptions) ProtoMessage() {}
     
     func (x *MessageOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2606,17 +2577,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type FieldOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
    +	// NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
     	// The ctype option instructs the C++ code generator to use a different
     	// representation of the field than it normally would.  See the specific
     	// options below.  This option is only implemented to support use of
     	// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
    -	// type "bytes" in the open source release -- sorry, we'll try to include
    -	// other types in a future version!
    +	// type "bytes" in the open source release.
    +	// TODO: make ctype actually deprecated.
     	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
     	// The packed option can be enabled for repeated primitive fields to enable
     	// a more efficient representation on the wire. Rather than repeatedly
    @@ -2679,9 +2647,13 @@ type FieldOptions struct {
     	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
     	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
     	// Any features defined in the specific edition.
    -	Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
    +	Features       *FeatureSet                  `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
    +	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for FieldOptions fields.
    @@ -2697,11 +2669,9 @@ const (
     
     func (x *FieldOptions) Reset() {
     	*x = FieldOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FieldOptions) String() string {
    @@ -2712,7 +2682,7 @@ func (*FieldOptions) ProtoMessage() {}
     
     func (x *FieldOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2811,6 +2781,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet {
     	return nil
     }
     
    +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
    +	if x != nil {
    +		return x.FeatureSupport
    +	}
    +	return nil
    +}
    +
     func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
     	if x != nil {
     		return x.UninterpretedOption
    @@ -2819,24 +2796,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type OneofOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Any features defined in the specific edition.
     	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     func (x *OneofOptions) Reset() {
     	*x = OneofOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *OneofOptions) String() string {
    @@ -2847,7 +2821,7 @@ func (*OneofOptions) ProtoMessage() {}
     
     func (x *OneofOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2877,11 +2851,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type EnumOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Set this option to true to allow mapping different tag names to the same
     	// value.
     	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
    @@ -2903,6 +2873,9 @@ type EnumOptions struct {
     	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for EnumOptions fields.
    @@ -2912,11 +2885,9 @@ const (
     
     func (x *EnumOptions) Reset() {
     	*x = EnumOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *EnumOptions) String() string {
    @@ -2927,7 +2898,7 @@ func (*EnumOptions) ProtoMessage() {}
     
     func (x *EnumOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -2979,11 +2950,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type EnumValueOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Is this enum value deprecated?
     	// Depending on the target platform, this can emit Deprecated annotations
     	// for the enum value, or it will be completely ignored; in the very least,
    @@ -2995,8 +2962,13 @@ type EnumValueOptions struct {
     	// out when using debug formats, e.g. when the field contains sensitive
     	// credentials.
     	DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
    +	// Information about the support window of a feature value.
    +	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for EnumValueOptions fields.
    @@ -3007,11 +2979,9 @@ const (
     
     func (x *EnumValueOptions) Reset() {
     	*x = EnumValueOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *EnumValueOptions) String() string {
    @@ -3022,7 +2992,7 @@ func (*EnumValueOptions) ProtoMessage() {}
     
     func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3058,6 +3028,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool {
     	return Default_EnumValueOptions_DebugRedact
     }
     
    +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
    +	if x != nil {
    +		return x.FeatureSupport
    +	}
    +	return nil
    +}
    +
     func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
     	if x != nil {
     		return x.UninterpretedOption
    @@ -3066,11 +3043,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type ServiceOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Any features defined in the specific edition.
     	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
     	// Is this service deprecated?
    @@ -3080,6 +3053,9 @@ type ServiceOptions struct {
     	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for ServiceOptions fields.
    @@ -3089,11 +3065,9 @@ const (
     
     func (x *ServiceOptions) Reset() {
     	*x = ServiceOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ServiceOptions) String() string {
    @@ -3104,7 +3078,7 @@ func (*ServiceOptions) ProtoMessage() {}
     
     func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3141,11 +3115,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
     }
     
     type MethodOptions struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Is this method deprecated?
     	// Depending on the target platform, this can emit Deprecated annotations
     	// for the method, or it will be completely ignored; in the very least,
    @@ -3156,6 +3126,9 @@ type MethodOptions struct {
     	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
     	// The parser stores options it doesn't recognize here. See above.
     	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
    +	extensionFields     protoimpl.ExtensionFields
    +	unknownFields       protoimpl.UnknownFields
    +	sizeCache           protoimpl.SizeCache
     }
     
     // Default values for MethodOptions fields.
    @@ -3166,11 +3139,9 @@ const (
     
     func (x *MethodOptions) Reset() {
     	*x = MethodOptions{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *MethodOptions) String() string {
    @@ -3181,7 +3152,7 @@ func (*MethodOptions) ProtoMessage() {}
     
     func (x *MethodOptions) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3231,11 +3202,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
     // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
     // in them.
     type UninterpretedOption struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    -	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
    +	state protoimpl.MessageState          `protogen:"open.v1"`
    +	Name  []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
     	// The value of the uninterpreted option, in whatever type the tokenizer
     	// identified it as during parsing. Exactly one of these should be set.
     	IdentifierValue  *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
    @@ -3244,15 +3212,15 @@ type UninterpretedOption struct {
     	DoubleValue      *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
     	StringValue      []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
     	AggregateValue   *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
    +	unknownFields    protoimpl.UnknownFields
    +	sizeCache        protoimpl.SizeCache
     }
     
     func (x *UninterpretedOption) Reset() {
     	*x = UninterpretedOption{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *UninterpretedOption) String() string {
    @@ -3263,7 +3231,7 @@ func (*UninterpretedOption) ProtoMessage() {}
     
     func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3334,26 +3302,23 @@ func (x *UninterpretedOption) GetAggregateValue() string {
     // be designed and implemented to handle this, hopefully before we ever hit a
     // conflict here.
     type FeatureSet struct {
    -	state           protoimpl.MessageState
    -	sizeCache       protoimpl.SizeCache
    -	unknownFields   protoimpl.UnknownFields
    -	extensionFields protoimpl.ExtensionFields
    -
    +	state                 protoimpl.MessageState            `protogen:"open.v1"`
     	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
     	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
     	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
     	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
     	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
     	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
    +	extensionFields       protoimpl.ExtensionFields
    +	unknownFields         protoimpl.UnknownFields
    +	sizeCache             protoimpl.SizeCache
     }
     
     func (x *FeatureSet) Reset() {
     	*x = FeatureSet{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FeatureSet) String() string {
    @@ -3364,7 +3329,7 @@ func (*FeatureSet) ProtoMessage() {}
     
     func (x *FeatureSet) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3426,10 +3391,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
     // feature resolution. The resolution with this object becomes a simple search
     // for the closest matching edition, followed by proto merges.
     type FeatureSetDefaults struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state    protoimpl.MessageState                         `protogen:"open.v1"`
     	Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
     	// The minimum supported edition (inclusive) when this was constructed.
     	// Editions before this will not have defaults.
    @@ -3437,15 +3399,15 @@ type FeatureSetDefaults struct {
     	// The maximum known edition (inclusive) when this was constructed. Editions
     	// after this will not have reliable defaults.
     	MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
    +	unknownFields  protoimpl.UnknownFields
    +	sizeCache      protoimpl.SizeCache
     }
     
     func (x *FeatureSetDefaults) Reset() {
     	*x = FeatureSetDefaults{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FeatureSetDefaults) String() string {
    @@ -3456,7 +3418,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
     
     func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3495,10 +3457,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
     // Encapsulates information about the original source file from which a
     // FileDescriptorProto was generated.
     type SourceCodeInfo struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// A Location identifies a piece of source code in a .proto file which
     	// corresponds to a particular definition.  This information is intended
     	// to be useful to IDEs, code indexers, documentation generators, and similar
    @@ -3547,16 +3506,17 @@ type SourceCodeInfo struct {
     	//   - Code which tries to interpret locations should probably be designed to
     	//     ignore those that it doesn't understand, as more types of locations could
     	//     be recorded in the future.
    -	Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
    +	Location        []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
    +	extensionFields protoimpl.ExtensionFields
    +	unknownFields   protoimpl.UnknownFields
    +	sizeCache       protoimpl.SizeCache
     }
     
     func (x *SourceCodeInfo) Reset() {
     	*x = SourceCodeInfo{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *SourceCodeInfo) String() string {
    @@ -3567,7 +3527,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
     
     func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3593,22 +3553,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
     // file. A GeneratedCodeInfo message is associated with only one generated
     // source file, but may contain references to different source .proto files.
     type GeneratedCodeInfo struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// An Annotation connects some span of text in generated code to an element
     	// of its generating .proto file.
    -	Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
    +	Annotation    []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *GeneratedCodeInfo) Reset() {
     	*x = GeneratedCodeInfo{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *GeneratedCodeInfo) String() string {
    @@ -3619,7 +3576,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
     
     func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3642,22 +3599,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
     }
     
     type DescriptorProto_ExtensionRange struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    +	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
    +	Options       *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	Start   *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    -	End     *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
    -	Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *DescriptorProto_ExtensionRange) Reset() {
     	*x = DescriptorProto_ExtensionRange{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *DescriptorProto_ExtensionRange) String() string {
    @@ -3668,7 +3622,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
     
     func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3708,21 +3662,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
     // fields or extension ranges in the same message. Reserved ranges may
     // not overlap.
     type DescriptorProto_ReservedRange struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    +	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
     	unknownFields protoimpl.UnknownFields
    -
    -	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    -	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *DescriptorProto_ReservedRange) Reset() {
     	*x = DescriptorProto_ReservedRange{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *DescriptorProto_ReservedRange) String() string {
    @@ -3733,7 +3684,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
     
     func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3763,10 +3714,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 {
     }
     
     type ExtensionRangeOptions_Declaration struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The extension number declared within the extension range.
     	Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"`
     	// The fully-qualified name of the extension field. There must be a leading
    @@ -3782,16 +3730,16 @@ type ExtensionRangeOptions_Declaration struct {
     	Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"`
     	// If true, indicates that the extension must be defined as repeated.
     	// Otherwise the extension must be defined as optional.
    -	Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
    +	Repeated      *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *ExtensionRangeOptions_Declaration) Reset() {
     	*x = ExtensionRangeOptions_Declaration{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ExtensionRangeOptions_Declaration) String() string {
    @@ -3802,7 +3750,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
     
     func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3859,21 +3807,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool {
     // is inclusive such that it can appropriately represent the entire int32
     // domain.
     type EnumDescriptorProto_EnumReservedRange struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    +	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
     	unknownFields protoimpl.UnknownFields
    -
    -	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
    -	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
     	*x = EnumDescriptorProto_EnumReservedRange{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *EnumDescriptorProto_EnumReservedRange) String() string {
    @@ -3884,7 +3829,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
     
     func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3914,21 +3859,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
     }
     
     type FieldOptions_EditionDefault struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	Edition       *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    +	Value         *string                `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
     	unknownFields protoimpl.UnknownFields
    -
    -	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    -	Value   *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *FieldOptions_EditionDefault) Reset() {
     	*x = FieldOptions_EditionDefault{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FieldOptions_EditionDefault) String() string {
    @@ -3939,7 +3881,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
     
     func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -3968,27 +3910,103 @@ func (x *FieldOptions_EditionDefault) GetValue() string {
     	return ""
     }
     
    +// Information about the support window of a feature.
    +type FieldOptions_FeatureSupport struct {
    +	state protoimpl.MessageState `protogen:"open.v1"`
    +	// The edition that this feature was first available in.  In editions
    +	// earlier than this one, the default assigned to EDITION_LEGACY will be
    +	// used, and proto files will not be able to override it.
    +	EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"`
    +	// The edition this feature becomes deprecated in.  Using this after this
    +	// edition may trigger warnings.
    +	EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"`
    +	// The deprecation warning text if this feature is used after the edition it
    +	// was marked deprecated in.
    +	DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"`
    +	// The edition this feature is no longer available in.  In editions after
    +	// this one, the last default assigned will be used, and proto files will
    +	// not be able to override it.
    +	EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
    +	unknownFields  protoimpl.UnknownFields
    +	sizeCache      protoimpl.SizeCache
    +}
    +
    +func (x *FieldOptions_FeatureSupport) Reset() {
    +	*x = FieldOptions_FeatureSupport{}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
    +}
    +
    +func (x *FieldOptions_FeatureSupport) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*FieldOptions_FeatureSupport) ProtoMessage() {}
    +
    +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
    +	if x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead.
    +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) {
    +	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1}
    +}
    +
    +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition {
    +	if x != nil && x.EditionIntroduced != nil {
    +		return *x.EditionIntroduced
    +	}
    +	return Edition_EDITION_UNKNOWN
    +}
    +
    +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition {
    +	if x != nil && x.EditionDeprecated != nil {
    +		return *x.EditionDeprecated
    +	}
    +	return Edition_EDITION_UNKNOWN
    +}
    +
    +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string {
    +	if x != nil && x.DeprecationWarning != nil {
    +		return *x.DeprecationWarning
    +	}
    +	return ""
    +}
    +
    +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
    +	if x != nil && x.EditionRemoved != nil {
    +		return *x.EditionRemoved
    +	}
    +	return Edition_EDITION_UNKNOWN
    +}
    +
     // The name of the uninterpreted option.  Each string represents a segment in
     // a dot-separated name.  is_extension is true iff a segment represents an
     // extension (denoted with parentheses in options specs in .proto files).
     // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
     // "foo.(bar.baz).moo".
     type UninterpretedOption_NamePart struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
    +	NamePart      *string                `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
    +	IsExtension   *bool                  `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	NamePart    *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
    -	IsExtension *bool   `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *UninterpretedOption_NamePart) Reset() {
     	*x = UninterpretedOption_NamePart{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *UninterpretedOption_NamePart) String() string {
    @@ -3998,8 +4016,8 @@ func (x *UninterpretedOption_NamePart) String() string {
     func (*UninterpretedOption_NamePart) ProtoMessage() {}
     
     func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -4033,21 +4051,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
     // the defaults at the closest matching edition ordered at or before it should
     // be used.  This field must be in strict ascending order by edition.
     type FeatureSetDefaults_FeatureSetEditionDefault struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state   protoimpl.MessageState `protogen:"open.v1"`
    +	Edition *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    +	// Defaults of features that can be overridden in this edition.
    +	OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
    +	// Defaults of features that can't be overridden in this edition.
    +	FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
     	unknownFields protoimpl.UnknownFields
    -
    -	Edition  *Edition    `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
    -	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
     	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
    @@ -4057,8 +4075,8 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
     func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
     
     func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -4080,18 +4098,22 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition {
     	return Edition_EDITION_UNKNOWN
     }
     
    -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
    +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet {
     	if x != nil {
    -		return x.Features
    +		return x.OverridableFeatures
     	}
     	return nil
     }
     
    -type SourceCodeInfo_Location struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet {
    +	if x != nil {
    +		return x.FixedFeatures
    +	}
    +	return nil
    +}
     
    +type SourceCodeInfo_Location struct {
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Identifies which part of the FileDescriptorProto was defined at this
     	// location.
     	//
    @@ -4183,15 +4205,15 @@ type SourceCodeInfo_Location struct {
     	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
     	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
     	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
    +	unknownFields           protoimpl.UnknownFields
    +	sizeCache               protoimpl.SizeCache
     }
     
     func (x *SourceCodeInfo_Location) Reset() {
     	*x = SourceCodeInfo_Location{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *SourceCodeInfo_Location) String() string {
    @@ -4201,8 +4223,8 @@ func (x *SourceCodeInfo_Location) String() string {
     func (*SourceCodeInfo_Location) ProtoMessage() {}
     
     func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -4253,10 +4275,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
     }
     
     type GeneratedCodeInfo_Annotation struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Identifies the element in the original source .proto file. This field
     	// is formatted the same as SourceCodeInfo.Location.path.
     	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
    @@ -4268,17 +4287,17 @@ type GeneratedCodeInfo_Annotation struct {
     	// Identifies the ending offset in bytes in the generated code that
     	// relates to the identified object. The end offset should be one past
     	// the last relevant byte (so the length of the text = end - begin).
    -	End      *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
    -	Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
    +	End           *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
    +	Semantic      *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *GeneratedCodeInfo_Annotation) Reset() {
     	*x = GeneratedCodeInfo_Annotation{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *GeneratedCodeInfo_Annotation) String() string {
    @@ -4288,8 +4307,8 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
     func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
     
     func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
    -	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -4345,319 +4364,495 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
     	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
     	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
     	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
    +	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
     	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
     	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
     	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
     	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
    -	0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
    -	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
    -	0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
    -	0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
    -	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
    -	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
    -	0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
    -	0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
    -	0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
    -	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
    -	0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
    -	0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    -	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
    -	0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
    -	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
    -	0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
    -	0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
    -	0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
    -	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    -	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
    -	0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
    -	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    -	0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
    -	0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
    -	0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
    -	0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
    -	0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
    -	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
    -	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
    -	0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
    -	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
    -	0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
    -	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
    -	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
    -	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
    -	0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
    -	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
    -	0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
    -	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
    +	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
    +	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    +	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
    +	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
    +	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
    +	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
    +	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
    +	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
    +	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
    +	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
    +	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
    +	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
    +	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
    +	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
    +	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    +	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
    +	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
    +	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
    +	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
    +	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
    +	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
    +	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
    +	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
    +	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
    +	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    +	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
    +	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
     	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
    -	0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
    -	0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
    -	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
    -	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
    -	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
    -	0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
    -	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
    -	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
    +	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
    +	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
     	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
    -	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
    -	0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    -	0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
    -	0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
    -	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
    -	0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
    -	0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
    -	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
    -	0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
    -	0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
    -	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
    -	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
    -	0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
    -	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78,
    -	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    -	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
    -	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
    -	0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
    -	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
    -	0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
    -	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
    -	0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
    -	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
    -	0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
    -	0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
    -	0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88,
    -	0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
    -	0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c,
    -	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c,
    -	0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
    -	0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73,
    -	0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
    -	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
    -	0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66,
    -	0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
    -	0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a,
    -	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08,
    -	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65,
    +	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
    +	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
    +	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
    +	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
    +	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
    +	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    +	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    +	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
    +	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
    +	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
    +	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
    +	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
    +	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
    +	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
    +	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
    +	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
    +	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
    +	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
    +	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    +	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
    +	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
    +	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    +	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
    +	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
    +	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
    +	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
    +	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    +	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
    +	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
    +	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    +	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    +	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
    +	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
    +	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
    +	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
    +	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
    +	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
    +	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
    +	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
    +	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    +	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
    +	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
    +	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
    +	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
    +	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
    +	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    +	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    +	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
    +	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    +	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    +	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    +	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
    +	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
    +	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    +	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
    +	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
    +	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
    +	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
    +	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
    +	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
    +	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
    +	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
    +	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
    +	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
    +	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
    +	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
    +	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
    +	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
    +	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
    +	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
    +	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
    +	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
    +	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
    +	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
    +	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
    +	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
    +	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    +	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    +	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
    +	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
    +	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
     	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
    -	0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a,
    -	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
    +	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
    +	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
     	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
     	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
    -	0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c,
    -	0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    -	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
    -	0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
    -	0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,
    -	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
    -	0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f,
    -	0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20,
    -	0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
    -	0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
    +	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
    +	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
    +	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
    +	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
    +	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
    +	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
    +	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
    +	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
    +	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
    +	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
    +	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
    +	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
    +	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
    +	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
    +	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
    +	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
    +	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
    +	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
    +	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
    +	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
    +	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
    +	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
    +	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
    +	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
    +	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
    +	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
    +	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
    +	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
    +	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
    +	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
    +	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
    +	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
    +	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
    +	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
    +	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
    +	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    +	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    +	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
    +	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
    +	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
    +	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    +	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
    +	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
     	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6,
    -	0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
    -	0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
    -	0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
    -	0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50,
    -	0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
    -	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54,
    -	0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a,
    -	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b,
    -	0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a,
    -	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a,
    -	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12,
    -	0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12,
    -	0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d,
    -	0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12,
    -	0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
    -	0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45,
    -	0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49,
    -	0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
    -	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c,
    -	0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e,
    -	0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45,
    -	0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45,
    -	0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14,
    -	0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    -	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
    -	0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
    -	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
    -	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
    -	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
    +	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
    +	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
     	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
    -	0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
    -	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36,
    +	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    +	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
    +	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
    +	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
    +	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
    +	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
    +	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
    +	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
    +	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
    +	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
    +	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    +	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
    +	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
     	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
    -	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
    -	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
    -	0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
    -	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
    -	0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
    -	0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e,
    -	0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
    -	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
    -	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
    -	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d,
    -	0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
    -	0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
    -	0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
    -	0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01,
    -	0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    -	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
    -	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06,
    -	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
    -	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
    -	0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
    -	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
    +	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    +	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    +	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
    +	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    +	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
    +	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
    +	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
    +	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    +	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
    +	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
    +	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
    +	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
    +	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
    +	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
    +	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
    +	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
    +	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
    +	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
    +	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
    +	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    +	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
    +	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    +	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
    +	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
    +	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
    +	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
    +	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
    +	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
    +	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
    +	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
    +	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
    +	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
    +	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
    +	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
    +	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
    +	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
    +	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
    +	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
    +	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
    +	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
    +	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
    +	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    +	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
    +	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
    +	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
    +	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
    +	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
    +	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
    +	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    +	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
    +	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
    +	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
    +	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
    +	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
    +	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
    +	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
    +	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
    +	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
    +	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
    +	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
    +	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
    +	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
    +	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
    +	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
    +	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
    +	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
    +	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
    +	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
    +	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
    +	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
    +	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
    +	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
    +	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
    +	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
    +	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    +	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
    +	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    +	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
    +	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
    +	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
     	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68,
    -	0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
    -	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
    -	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74,
    -	0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74,
    -	0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74,
    -	0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75,
    -	0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
    -	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
    -	0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
    -	0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    -	0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
    -	0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
    -	0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    -	0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
    -	0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
    -	0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
    -	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
    -	0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43,
    -	0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61,
    -	0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18,
    -	0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61,
    -	0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
    -	0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
    -	0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
    -	0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61,
    -	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e,
    -	0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74,
    -	0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18,
    -	0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61,
    -	0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66,
    -	0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f,
    -	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f,
    -	0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d,
    -	0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63,
    -	0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61,
    -	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65,
    -	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01,
    -	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e,
    -	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15,
    -	0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
    -	0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    -	0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
    -	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65,
    -	0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12,
    -	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47,
    -	0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
    -	0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
    -	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
    -	0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
    -	0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
    -	0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
    -	0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
    -	0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
    -	0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
    -	0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
    -	0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
    -	0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
    -	0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
    -	0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
    -	0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
    -	0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
    -	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
    -	0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
    -	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
    -	0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
    -	0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
    -	0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
    -	0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    -	0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    +	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
    +	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
    +	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
    +	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
    +	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
    +	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
    +	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    +	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
    +	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
    +	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
    +	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
    +	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
    +	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    +	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
    +	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
    +	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
    +	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    +	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
    +	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
    +	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
    +	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
    +	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
    +	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
    +	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
    +	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
    +	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    +	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
    +	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
    +	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
    +	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    +	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
    +	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
    +	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
    +	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
    +	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
    +	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
    +	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
    +	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
    +	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
    +	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
    +	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
    +	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
    +	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
    +	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    +	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
    +	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
    +	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
    +	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
    +	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
    +	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    +	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
    +	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
    +	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
    +	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
    +	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
    +	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
    +	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
    +	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
    +	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    +	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
    +	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
    +	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
    +	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
    +	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
    +	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    +	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    +	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
    +	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
    +	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
    +	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
    +	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
    +	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    +	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
    +	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
    +	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
    +	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
    +	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
    +	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
    +	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    +	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
    +	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
    +	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
    +	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
    +	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
    +	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
    +	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
    +	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
    +	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
    +	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    +	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
    +	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
    +	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
    +	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
    +	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
    +	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
    +	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
    +	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
    +	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
    +	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
    +	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
    +	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
    +	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
    +	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
    +	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
    +	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
    +	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
    +	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
    +	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
    +	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
    +	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
    +	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    +	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
    +	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
    +	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
    +	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
    +	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    +	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
    +	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
    +	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
    +	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
    +	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
    +	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    +	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
    +	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
    +	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
    +	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
    +	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
    +	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
    +	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
    +	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    +	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
    +	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
    +	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
    +	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
    +	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
    +	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
    +	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
    +	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
    +	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
    +	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
    +	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
    +	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
    +	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
    +	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
    +	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
     	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
     	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
     	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
    @@ -4665,288 +4860,145 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
     	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
     	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
     	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    -	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70,
    -	0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50,
    -	0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
    -	0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
    -	0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
    -	0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03,
    -	0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f,
    -	0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
    -	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
    -	0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c,
    -	0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
    -	0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c,
    -	0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
    -	0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a,
    -	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
    -	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
    -	0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79,
    -	0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79,
    -	0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c,
    -	0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
    -	0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08,
    -	0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
    -	0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43,
    -	0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
    -	0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    +	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
    +	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
    +	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    +	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    +	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
    +	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    +	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
    +	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
    +	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    +	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
    +	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
    +	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
     	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
     	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
     	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
     	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
     	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
    -	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05,
    -	0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04,
    -	0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
    -	0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
    -	0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b,
    -	0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
    -	0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
    -	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
    -	0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41,
    -	0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a,
    -	0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
    -	0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
    -	0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
    -	0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
    -	0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
    -	0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
    -	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77,
    -	0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    -	0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f,
    -	0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
    -	0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74,
    -	0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20,
    -	0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
    -	0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a,
    -	0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e,
    -	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    -	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
    -	0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28,
    -	0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    -	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52,
    -	0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
    -	0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01,
    -	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52,
    -	0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
    +	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
    +	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
    +	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
    +	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
    +	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
     	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
     	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
     	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
     	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
     	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
    -	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    -	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    -	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    -	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
    -	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
    -	0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49,
    -	0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10,
    -	0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02,
    -	0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53,
    -	0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f,
    -	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
    -	0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45,
    -	0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
    -	0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52,
    -	0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45,
    -	0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c,
    -	0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
    -	0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
    -	0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10,
    -	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45,
    -	0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
    -	0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47,
    -	0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
    -	0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11,
    -	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c,
    -	0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
    -	0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
    -	0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06,
    -	0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
    -	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13,
    -	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
    -	0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f,
    -	0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08,
    -	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04,
    -	0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
    -	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
    -	0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69,
    -	0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41,
    -	0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
    -	0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
    -	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64,
    -	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79,
    -	0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
    -	0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52,
    -	0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63,
    -	0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
    -	0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
    -	0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
    -	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
    -	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
    -	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    -	0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
    -	0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d,
    -	0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a,
    -	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
    -	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
    -	0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
    -	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
    -	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c,
    -	0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01,
    -	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
    -	0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
    -	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
    -	0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    -	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
    -	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e,
    -	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37,
    -	0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b,
    -	0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
    -	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
    -	0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
    -	0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
    -	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
    -	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    -	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
    -	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    -	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
    -	0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
    -	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
    -	0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
    -	0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
    -	0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
    -	0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    -	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
    -	0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
    -	0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
    -	0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
    -	0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28,
    -	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08,
    -	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
    -	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    -	0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    -	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
    -	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
    -	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
    -	0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
    -	0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
    -	0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
    -	0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43,
    -	0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
    -	0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
    -	0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
    -	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
    -	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
    -	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
    -	0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64,
    -	0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03,
    -	0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
    -	0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
    -	0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
    -	0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61,
    -	0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
    -	0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
    -	0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
    -	0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
    -	0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56,
    -	0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76,
    -	0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69,
    -	0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65,
    -	0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
    -	0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
    -	0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09,
    -	0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52,
    -	0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f,
    -	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52,
    -	0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a,
    -	0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e,
    -	0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01,
    -	0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
    -	0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42,
    -	0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
    -	0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49,
    -	0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
    -	0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c,
    -	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75,
    -	0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67,
    +	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
    +	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    +	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
    +	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
    +	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
    +	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
    +	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
    +	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
    +	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
    +	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
    +	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
     	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    -	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
    -	0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b,
    -	0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04,
    -	0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70,
    -	0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66,
    -	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20,
    -	0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
    -	0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
    -	0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01,
    -	0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6,
    -	0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52,
    -	0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
    -	0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76,
    -	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32,
    -	0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66,
    -	0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01,
    -	0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18,
    -	0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07,
    -	0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    -	0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
    -	0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
    +	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
    +	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
    +	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
    +	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
    +	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
    +	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
    +	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
    +	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
    +	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
    +	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
    +	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
    +	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    +	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
    +	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
    +	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
    +	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
    +	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
    +	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
    +	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
    +	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
    +	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
    +	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
    +	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
    +	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
    +	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
    +	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
    +	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
    +	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
    +	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
    +	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
    +	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
    +	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
    +	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
    +	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
    +	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
    +	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    +	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
    +	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
    +	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
    +	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
    +	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
    +	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
    +	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
    +	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    +	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
    +	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
    +	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
    +	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
    +	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
    +	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
    +	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
     	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
    -	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98,
    -	0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
    -	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61,
    -	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73,
    -	0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
    -	0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    -	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f,
    -	0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98,
    -	0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59,
    -	0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2,
    -	0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73,
    +	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
    +	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
    +	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
    +	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
    +	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
    +	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
    +	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
    +	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
    +	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
    +	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
    +	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
    +	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
    +	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
    +	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
    +	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    +	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
    +	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
    +	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
    +	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
    +	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
    +	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
    +	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
    +	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
    +	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
    +	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
    +	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
    +	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
     	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
     	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
     	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
    @@ -4962,113 +5014,122 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
     	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
     	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
     	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
    -	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
    +	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
     	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
     	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
     	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
    -	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f,
    -	0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12,
    -	0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
    -	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
    -	0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44,
    -	0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10,
    -	0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
    -	0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55,
    -	0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f,
    -	0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45,
    -	0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07,
    -	0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07,
    -	0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e,
    -	0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12,
    -	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c,
    -	0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01,
    -	0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
    -	0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    -	0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
    -	0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f,
    -	0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
    -	0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
    -	0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
    -	0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69,
    -	0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
    -	0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12,
    -	0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e,
    -	0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
    -	0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
    -	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    -	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
    -	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a,
    -	0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
    -	0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28,
    -	0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
    -	0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63,
    -	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
    -	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
    -	0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70,
    -	0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70,
    -	0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f,
    -	0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65,
    -	0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a,
    -	0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
    -	0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
    -	0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65,
    -	0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63,
    -	0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c,
    -	0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f,
    -	0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72,
    -	0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a,
    -	0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
    -	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
    -	0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
    -	0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a,
    -	0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
    -	0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
    -	0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c,
    -	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46,
    -	0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01,
    -	0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64,
    -	0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73,
    -	0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e,
    -	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    +	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
    +	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
    +	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
    +	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
    +	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
    +	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
    +	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
    +	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
    +	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
    +	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
    +	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
    +	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
    +	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
    +	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
    +	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
    +	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
    +	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
    +	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
    +	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
    +	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
    +	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
    +	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
    +	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
    +	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    +	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
    +	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
    +	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
    +	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    +	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
    +	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
    +	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    +	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
    +	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
    +	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    +	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
    +	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
    +	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
    +	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
    +	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
    +	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
    +	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
    +	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
    +	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
    +	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
    +	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
    +	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
    +	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
    +	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
    +	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
    +	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
    +	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
     	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
    -	0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d,
    -	0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22,
    -	0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e,
    -	0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09,
    -	0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64,
    -	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
    -	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44,
    -	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12,
    -	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f,
    -	0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    -	0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49,
    -	0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44,
    -	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
    -	0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32,
    -	0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17,
    -	0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45,
    -	0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45,
    -	0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53,
    -	0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
    -	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54,
    -	0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49,
    -	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e,
    -	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
    -	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    -	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
    -	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63,
    -	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50,
    -	0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
    +	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
    +	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
    +	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
    +	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
    +	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
    +	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
    +	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
    +	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
    +	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
    +	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
    +	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
    +	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
    +	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
    +	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
    +	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
    +	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
    +	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
    +	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
    +	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
    +	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    +	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
    +	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
    +	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
    +	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
    +	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    +	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
    +	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
    +	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
    +	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
    +	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
    +	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
    +	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
    +	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
    +	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
    +	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
    +	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
    +	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    +	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
    +	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    +	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
    +	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
     }
     
     var (
    @@ -5084,8 +5145,8 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
    -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
    -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
    +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
    +var file_google_protobuf_descriptor_proto_goTypes = []any{
     	(Edition)(0), // 0: google.protobuf.Edition
     	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
     	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
    @@ -5131,10 +5192,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
     	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
     	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
     	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
    -	(*UninterpretedOption_NamePart)(nil),                // 45: google.protobuf.UninterpretedOption.NamePart
    -	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    -	(*SourceCodeInfo_Location)(nil),                     // 47: google.protobuf.SourceCodeInfo.Location
    -	(*GeneratedCodeInfo_Annotation)(nil),                // 48: google.protobuf.GeneratedCodeInfo.Annotation
    +	(*FieldOptions_FeatureSupport)(nil),                 // 45: google.protobuf.FieldOptions.FeatureSupport
    +	(*UninterpretedOption_NamePart)(nil),                // 46: google.protobuf.UninterpretedOption.NamePart
    +	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    +	(*SourceCodeInfo_Location)(nil),                     // 48: google.protobuf.SourceCodeInfo.Location
    +	(*GeneratedCodeInfo_Annotation)(nil),                // 49: google.protobuf.GeneratedCodeInfo.Annotation
     }
     var file_google_protobuf_descriptor_proto_depIdxs = []int32{
     	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
    @@ -5179,40 +5241,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{
     	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
     	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
     	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	9,  // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
    -	36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
    -	35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    -	45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
    -	10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
    -	11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
    -	12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
    -	13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
    -	14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
    -	15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
    -	46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    -	0,  // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
    -	0,  // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
    -	47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
    -	48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
    -	20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
    -	0,  // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
    -	0,  // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
    -	36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
    -	16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    -	71, // [71:71] is the sub-list for method output_type
    -	71, // [71:71] is the sub-list for method input_type
    -	71, // [71:71] is the sub-list for extension type_name
    -	71, // [71:71] is the sub-list for extension extendee
    -	0,  // [0:71] is the sub-list for field type_name
    +	45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    +	35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
    +	35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
    +	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
    +	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
    +	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
    +	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
    +	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
    +	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
    +	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
    +	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
    +	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
    +	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
    +	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
    +	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
    +	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
    +	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
    +	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
    +	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
    +	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
    +	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
    +	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
    +	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
    +	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
    +	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
    +	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
    +	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
    +	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
    +	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
    +	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
    +	77, // [77:77] is the sub-list for method output_type
    +	77, // [77:77] is the sub-list for method input_type
    +	77, // [77:77] is the sub-list for extension type_name
    +	77, // [77:77] is the sub-list for extension extendee
    +	0,  // [0:77] is the sub-list for field type_name
     }
     
     func init() { file_google_protobuf_descriptor_proto_init() }
    @@ -5220,419 +5288,13 @@ func file_google_protobuf_descriptor_proto_init() {
     	if File_google_protobuf_descriptor_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FileDescriptorSet); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FileDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*DescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ExtensionRangeOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FieldDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*OneofDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*EnumDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*EnumValueDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ServiceDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*MethodDescriptorProto); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FileOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*MessageOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FieldOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*OneofOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*EnumOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*EnumValueOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ServiceOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*MethodOptions); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*UninterpretedOption); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FeatureSet); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			case 3:
    -				return &v.extensionFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FeatureSetDefaults); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*SourceCodeInfo); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*GeneratedCodeInfo); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*DescriptorProto_ExtensionRange); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*DescriptorProto_ReservedRange); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ExtensionRangeOptions_Declaration); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FieldOptions_EditionDefault); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*UninterpretedOption_NamePart); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*SourceCodeInfo_Location); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*GeneratedCodeInfo_Annotation); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
     			RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
     			NumEnums:      17,
    -			NumMessages:   32,
    +			NumMessages:   33,
     			NumExtensions: 0,
     			NumServices:   0,
     		},
    diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    index 25de5ae008..e0b72eaf92 100644
    --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
    @@ -6,9 +6,9 @@
     // https://developers.google.com/open-source/licenses/bsd
     
     // Code generated by protoc-gen-go. DO NOT EDIT.
    -// source: reflect/protodesc/proto/go_features.proto
    +// source: google/protobuf/go_features.proto
     
    -package proto
    +package gofeaturespb
     
     import (
     	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    @@ -18,22 +18,150 @@ import (
     	sync "sync"
     )
     
    -type GoFeatures struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    +type GoFeatures_APILevel int32
    +
    +const (
    +	// API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
    +	// but needs to be a separate value to distinguish between
    +	// an explicitly set api level or a missing api level.
    +	GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
    +	GoFeatures_API_OPEN              GoFeatures_APILevel = 1
    +	GoFeatures_API_HYBRID            GoFeatures_APILevel = 2
    +	GoFeatures_API_OPAQUE            GoFeatures_APILevel = 3
    +)
    +
    +// Enum value maps for GoFeatures_APILevel.
    +var (
    +	GoFeatures_APILevel_name = map[int32]string{
    +		0: "API_LEVEL_UNSPECIFIED",
    +		1: "API_OPEN",
    +		2: "API_HYBRID",
    +		3: "API_OPAQUE",
    +	}
    +	GoFeatures_APILevel_value = map[string]int32{
    +		"API_LEVEL_UNSPECIFIED": 0,
    +		"API_OPEN":              1,
    +		"API_HYBRID":            2,
    +		"API_OPAQUE":            3,
    +	}
    +)
    +
    +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
    +	p := new(GoFeatures_APILevel)
    +	*p = x
    +	return p
    +}
    +
    +func (x GoFeatures_APILevel) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
    +}
    +
    +func (GoFeatures_APILevel) Type() protoreflect.EnumType {
    +	return &file_google_protobuf_go_features_proto_enumTypes[0]
    +}
    +
    +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Do not use.
    +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
    +	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
    +	if err != nil {
    +		return err
    +	}
    +	*x = GoFeatures_APILevel(num)
    +	return nil
    +}
    +
    +// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
    +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
    +	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
    +}
    +
    +type GoFeatures_StripEnumPrefix int32
    +
    +const (
    +	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED   GoFeatures_StripEnumPrefix = 0
    +	GoFeatures_STRIP_ENUM_PREFIX_KEEP          GoFeatures_StripEnumPrefix = 1
    +	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
    +	GoFeatures_STRIP_ENUM_PREFIX_STRIP         GoFeatures_StripEnumPrefix = 3
    +)
    +
    +// Enum value maps for GoFeatures_StripEnumPrefix.
    +var (
    +	GoFeatures_StripEnumPrefix_name = map[int32]string{
    +		0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
    +		1: "STRIP_ENUM_PREFIX_KEEP",
    +		2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
    +		3: "STRIP_ENUM_PREFIX_STRIP",
    +	}
    +	GoFeatures_StripEnumPrefix_value = map[string]int32{
    +		"STRIP_ENUM_PREFIX_UNSPECIFIED":   0,
    +		"STRIP_ENUM_PREFIX_KEEP":          1,
    +		"STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
    +		"STRIP_ENUM_PREFIX_STRIP":         3,
    +	}
    +)
    +
    +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
    +	p := new(GoFeatures_StripEnumPrefix)
    +	*p = x
    +	return p
    +}
    +
    +func (x GoFeatures_StripEnumPrefix) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
    +}
    +
    +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
    +	return &file_google_protobuf_go_features_proto_enumTypes[1]
    +}
    +
    +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Do not use.
    +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
    +	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
    +	if err != nil {
    +		return err
    +	}
    +	*x = GoFeatures_StripEnumPrefix(num)
    +	return nil
    +}
     
    +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
    +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
    +	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
    +}
    +
    +type GoFeatures struct {
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Whether or not to generate the deprecated UnmarshalJSON method for enums.
    +	// Can only be true for proto using the Open Struct api.
     	LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
    +	// One of OPEN, HYBRID or OPAQUE.
    +	ApiLevel        *GoFeatures_APILevel        `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
    +	StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
    +	unknownFields   protoimpl.UnknownFields
    +	sizeCache       protoimpl.SizeCache
     }
     
     func (x *GoFeatures) Reset() {
     	*x = GoFeatures{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *GoFeatures) String() string {
    @@ -43,8 +171,8 @@ func (x *GoFeatures) String() string {
     func (*GoFeatures) ProtoMessage() {}
     
     func (x *GoFeatures) ProtoReflect() protoreflect.Message {
    -	mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -56,7 +184,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message {
     
     // Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead.
     func (*GoFeatures) Descriptor() ([]byte, []int) {
    -	return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0}
    +	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0}
     }
     
     func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
    @@ -66,112 +194,152 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
     	return false
     }
     
    -var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
    +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
    +	if x != nil && x.ApiLevel != nil {
    +		return *x.ApiLevel
    +	}
    +	return GoFeatures_API_LEVEL_UNSPECIFIED
    +}
    +
    +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
    +	if x != nil && x.StripEnumPrefix != nil {
    +		return *x.StripEnumPrefix
    +	}
    +	return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
    +}
    +
    +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
     	{
     		ExtendedType:  (*descriptorpb.FeatureSet)(nil),
     		ExtensionType: (*GoFeatures)(nil),
     		Field:         1002,
    -		Name:          "google.protobuf.go",
    +		Name:          "pb.go",
     		Tag:           "bytes,1002,opt,name=go",
    -		Filename:      "reflect/protodesc/proto/go_features.proto",
    +		Filename:      "google/protobuf/go_features.proto",
     	},
     }
     
     // Extension fields to descriptorpb.FeatureSet.
     var (
    -	// optional google.protobuf.GoFeatures go = 1002;
    -	E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0]
    +	// optional pb.GoFeatures go = 1002;
    +	E_Go = &file_google_protobuf_go_features_proto_extTypes[0]
     )
     
    -var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor
    -
    -var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{
    -	0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64,
    -	0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61,
    -	0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
    -	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f,
    -	0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65,
    -	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a,
    -	0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a,
    -	0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c,
    -	0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
    -	0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75,
    -	0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7,
    -	0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68,
    -	0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f,
    -	0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    -	0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07,
    -	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    -	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    -	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
    +
    +var file_google_protobuf_go_features_proto_rawDesc = []byte{
    +	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
    +	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
    +	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    +	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
    +	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
    +	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
    +	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
    +	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
    +	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
    +	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
    +	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
    +	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
    +	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
    +	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
    +	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
    +	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
    +	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
    +	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
    +	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
    +	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
    +	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
    +	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
    +	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
    +	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
    +	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
    +	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
    +	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
    +	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
    +	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
    +	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
    +	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
    +	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
    +	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
    +	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
    +	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
    +	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
    +	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
    +	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
    +	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
    +	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
    +	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
    +	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
    +	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
    +	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
    +	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
    +	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
    +	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
    +	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
    +	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
     	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    -	0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74,
    -	0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
    +	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
    +	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
     }
     
     var (
    -	file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once
    -	file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc
    +	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
    +	file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc
     )
     
    -func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte {
    -	file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() {
    -		file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData)
    +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
    +	file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
    +		file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData)
     	})
    -	return file_reflect_protodesc_proto_go_features_proto_rawDescData
    +	return file_google_protobuf_go_features_proto_rawDescData
     }
     
    -var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{
    -	(*GoFeatures)(nil),              // 0: google.protobuf.GoFeatures
    -	(*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
    +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
    +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    +var file_google_protobuf_go_features_proto_goTypes = []any{
    +	(GoFeatures_APILevel)(0),        // 0: pb.GoFeatures.APILevel
    +	(GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
    +	(*GoFeatures)(nil),              // 2: pb.GoFeatures
    +	(*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
     }
    -var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{
    -	1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet
    -	0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures
    -	2, // [2:2] is the sub-list for method output_type
    -	2, // [2:2] is the sub-list for method input_type
    -	1, // [1:2] is the sub-list for extension type_name
    -	0, // [0:1] is the sub-list for extension extendee
    -	0, // [0:0] is the sub-list for field type_name
    +var file_google_protobuf_go_features_proto_depIdxs = []int32{
    +	0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
    +	1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
    +	3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
    +	2, // 3: pb.go:type_name -> pb.GoFeatures
    +	4, // [4:4] is the sub-list for method output_type
    +	4, // [4:4] is the sub-list for method input_type
    +	3, // [3:4] is the sub-list for extension type_name
    +	2, // [2:3] is the sub-list for extension extendee
    +	0, // [0:2] is the sub-list for field type_name
     }
     
    -func init() { file_reflect_protodesc_proto_go_features_proto_init() }
    -func file_reflect_protodesc_proto_go_features_proto_init() {
    -	if File_reflect_protodesc_proto_go_features_proto != nil {
    +func init() { file_google_protobuf_go_features_proto_init() }
    +func file_google_protobuf_go_features_proto_init() {
    +	if File_google_protobuf_go_features_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*GoFeatures); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
     			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    -			RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc,
    -			NumEnums:      0,
    +			RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
    +			NumEnums:      2,
     			NumMessages:   1,
     			NumExtensions: 1,
     			NumServices:   0,
     		},
    -		GoTypes:           file_reflect_protodesc_proto_go_features_proto_goTypes,
    -		DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs,
    -		MessageInfos:      file_reflect_protodesc_proto_go_features_proto_msgTypes,
    -		ExtensionInfos:    file_reflect_protodesc_proto_go_features_proto_extTypes,
    +		GoTypes:           file_google_protobuf_go_features_proto_goTypes,
    +		DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
    +		EnumInfos:         file_google_protobuf_go_features_proto_enumTypes,
    +		MessageInfos:      file_google_protobuf_go_features_proto_msgTypes,
    +		ExtensionInfos:    file_google_protobuf_go_features_proto_extTypes,
     	}.Build()
    -	File_reflect_protodesc_proto_go_features_proto = out.File
    -	file_reflect_protodesc_proto_go_features_proto_rawDesc = nil
    -	file_reflect_protodesc_proto_go_features_proto_goTypes = nil
    -	file_reflect_protodesc_proto_go_features_proto_depIdxs = nil
    +	File_google_protobuf_go_features_proto = out.File
    +	file_google_protobuf_go_features_proto_rawDesc = nil
    +	file_google_protobuf_go_features_proto_goTypes = nil
    +	file_google_protobuf_go_features_proto_depIdxs = nil
     }
    diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
    deleted file mode 100644
    index d246571296..0000000000
    --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -// Protocol Buffers - Google's data interchange format
    -// Copyright 2023 Google Inc.  All rights reserved.
    -//
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file or at
    -// https://developers.google.com/open-source/licenses/bsd
    -
    -syntax = "proto2";
    -
    -package google.protobuf;
    -
    -import "google/protobuf/descriptor.proto";
    -
    -option go_package = "google.golang.org/protobuf/types/gofeaturespb";
    -
    -extend google.protobuf.FeatureSet {
    -  optional GoFeatures go = 1002;
    -}
    -
    -message GoFeatures {
    -  // Whether or not to generate the deprecated UnmarshalJSON method for enums.
    -  optional bool legacy_unmarshal_json_enum = 1 [
    -    retention = RETENTION_RUNTIME,
    -    targets = TARGET_TYPE_ENUM,
    -    edition_defaults = { edition: EDITION_PROTO2, value: "true" },
    -    edition_defaults = { edition: EDITION_PROTO3, value: "false" }
    -  ];
    -}
    diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    index 9de51be540..191552cce0 100644
    --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
    @@ -210,10 +210,7 @@ import (
     //	  "value": "1.212s"
     //	}
     type Any struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// A URL/resource name that uniquely identifies the type of the serialized
     	// protocol buffer message. This string must contain at least
     	// one "/" character. The last segment of the URL's path must represent
    @@ -244,7 +241,9 @@ type Any struct {
     	// used with implementation specific semantics.
     	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
     	// Must be a valid serialized protocol buffer of the above specified type.
    -	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // New marshals src into a new Any instance.
    @@ -368,11 +367,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
     
     func (x *Any) Reset() {
     	*x = Any{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_any_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_any_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Any) String() string {
    @@ -383,7 +380,7 @@ func (*Any) ProtoMessage() {}
     
     func (x *Any) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_any_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -445,7 +442,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_google_protobuf_any_proto_goTypes = []interface{}{
    +var file_google_protobuf_any_proto_goTypes = []any{
     	(*Any)(nil), // 0: google.protobuf.Any
     }
     var file_google_protobuf_any_proto_depIdxs = []int32{
    @@ -461,20 +458,6 @@ func file_google_protobuf_any_proto_init() {
     	if File_google_protobuf_any_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Any); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    index df709a8dd4..34d76e6cd9 100644
    --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
    @@ -141,10 +141,7 @@ import (
     // be expressed in JSON format as "3.000000001s", and 3 seconds and 1
     // microsecond should be expressed in JSON format as "3.000001s".
     type Duration struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Signed seconds of the span of time. Must be from -315,576,000,000
     	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
     	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
    @@ -155,7 +152,9 @@ type Duration struct {
     	// of one second or more, a non-zero value for the `nanos` field must be
     	// of the same sign as the `seconds` field. Must be from -999,999,999
     	// to +999,999,999 inclusive.
    -	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
    +	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // New constructs a new Duration from the provided time.Duration.
    @@ -245,11 +244,9 @@ func (x *Duration) check() uint {
     
     func (x *Duration) Reset() {
     	*x = Duration{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_duration_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_duration_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Duration) String() string {
    @@ -260,7 +257,7 @@ func (*Duration) ProtoMessage() {}
     
     func (x *Duration) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_duration_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -323,7 +320,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_google_protobuf_duration_proto_goTypes = []interface{}{
    +var file_google_protobuf_duration_proto_goTypes = []any{
     	(*Duration)(nil), // 0: google.protobuf.Duration
     }
     var file_google_protobuf_duration_proto_depIdxs = []int32{
    @@ -339,20 +336,6 @@ func file_google_protobuf_duration_proto_init() {
     	if File_google_protobuf_duration_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Duration); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    index 9a7277ba39..7fcdd382cc 100644
    --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
    @@ -48,18 +48,16 @@ import (
     //	  rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
     //	}
     type Empty struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    +	state         protoimpl.MessageState `protogen:"open.v1"`
     	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     func (x *Empty) Reset() {
     	*x = Empty{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_empty_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_empty_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Empty) String() string {
    @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {}
     
     func (x *Empty) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_empty_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -115,7 +113,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_google_protobuf_empty_proto_goTypes = []interface{}{
    +var file_google_protobuf_empty_proto_goTypes = []any{
     	(*Empty)(nil), // 0: google.protobuf.Empty
     }
     var file_google_protobuf_empty_proto_depIdxs = []int32{
    @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() {
     	if File_google_protobuf_empty_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Empty); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    index e8789cb331..e5d7da38c2 100644
    --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
    @@ -284,12 +284,11 @@ import (
     // request should verify the included field paths, and return an
     // `INVALID_ARGUMENT` error if any path is unmappable.
     type FieldMask struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The set of field mask paths.
    -	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
    +	Paths         []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // New constructs a field mask from a list of paths and verifies that
    @@ -467,11 +466,9 @@ func rangeFields(path string, f func(field string) bool) bool {
     
     func (x *FieldMask) Reset() {
     	*x = FieldMask{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FieldMask) String() string {
    @@ -482,7 +479,7 @@ func (*FieldMask) ProtoMessage() {}
     
     func (x *FieldMask) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -537,7 +534,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
    +var file_google_protobuf_field_mask_proto_goTypes = []any{
     	(*FieldMask)(nil), // 0: google.protobuf.FieldMask
     }
     var file_google_protobuf_field_mask_proto_depIdxs = []int32{
    @@ -553,20 +550,6 @@ func file_google_protobuf_field_mask_proto_init() {
     	if File_google_protobuf_field_mask_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FieldMask); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    index d2bac8b88e..f2c53ea337 100644
    --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
    @@ -49,11 +49,11 @@
     // The standard Go "encoding/json" package has functionality to serialize
     // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
     // ListValue.AsSlice methods can convert the protobuf message representation into
    -// a form represented by interface{}, map[string]interface{}, and []interface{}.
    +// a form represented by any, map[string]any, and []any.
     // This form can be used with other packages that operate on such data structures
     // and also directly with the standard json package.
     //
    -// In order to convert the interface{}, map[string]interface{}, and []interface{}
    +// In order to convert the any, map[string]any, and []any
     // forms back as Value, Struct, and ListValue messages, use the NewStruct,
     // NewList, and NewValue constructor functions.
     //
    @@ -88,28 +88,28 @@
     //
     // To construct a Value message representing the above JSON object:
     //
    -//	m, err := structpb.NewValue(map[string]interface{}{
    +//	m, err := structpb.NewValue(map[string]any{
     //		"firstName": "John",
     //		"lastName":  "Smith",
     //		"isAlive":   true,
     //		"age":       27,
    -//		"address": map[string]interface{}{
    +//		"address": map[string]any{
     //			"streetAddress": "21 2nd Street",
     //			"city":          "New York",
     //			"state":         "NY",
     //			"postalCode":    "10021-3100",
     //		},
    -//		"phoneNumbers": []interface{}{
    -//			map[string]interface{}{
    +//		"phoneNumbers": []any{
    +//			map[string]any{
     //				"type":   "home",
     //				"number": "212 555-1234",
     //			},
    -//			map[string]interface{}{
    +//			map[string]any{
     //				"type":   "office",
     //				"number": "646 555-4567",
     //			},
     //		},
    -//		"children": []interface{}{},
    +//		"children": []any{},
     //		"spouse":   nil,
     //	})
     //	if err != nil {
    @@ -120,6 +120,7 @@ package structpb
     
     import (
     	base64 "encoding/base64"
    +	json "encoding/json"
     	protojson "google.golang.org/protobuf/encoding/protojson"
     	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
     	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    @@ -186,18 +187,17 @@ func (NullValue) EnumDescriptor() ([]byte, []int) {
     //
     // The JSON representation for `Struct` is JSON object.
     type Struct struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Unordered map of dynamically typed values.
    -	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +	Fields        map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // NewStruct constructs a Struct from a general-purpose Go map.
     // The map keys must be valid UTF-8.
     // The map values are converted using NewValue.
    -func NewStruct(v map[string]interface{}) (*Struct, error) {
    +func NewStruct(v map[string]any) (*Struct, error) {
     	x := &Struct{Fields: make(map[string]*Value, len(v))}
     	for k, v := range v {
     		if !utf8.ValidString(k) {
    @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) {
     
     // AsMap converts x to a general-purpose Go map.
     // The map values are converted by calling Value.AsInterface.
    -func (x *Struct) AsMap() map[string]interface{} {
    +func (x *Struct) AsMap() map[string]any {
     	f := x.GetFields()
    -	vs := make(map[string]interface{}, len(f))
    +	vs := make(map[string]any, len(f))
     	for k, v := range f {
     		vs[k] = v.AsInterface()
     	}
    @@ -233,11 +233,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error {
     
     func (x *Struct) Reset() {
     	*x = Struct{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_struct_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_struct_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Struct) String() string {
    @@ -248,7 +246,7 @@ func (*Struct) ProtoMessage() {}
     
     func (x *Struct) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_struct_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -277,13 +275,10 @@ func (x *Struct) GetFields() map[string]*Value {
     //
     // The JSON representation for `Value` is JSON value.
     type Value struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The kind of value.
     	//
    -	// Types that are assignable to Kind:
    +	// Types that are valid to be assigned to Kind:
     	//
     	//	*Value_NullValue
     	//	*Value_NumberValue
    @@ -291,28 +286,31 @@ type Value struct {
     	//	*Value_BoolValue
     	//	*Value_StructValue
     	//	*Value_ListValue
    -	Kind isValue_Kind `protobuf_oneof:"kind"`
    +	Kind          isValue_Kind `protobuf_oneof:"kind"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // NewValue constructs a Value from a general-purpose Go interface.
     //
    -//	╔════════════════════════╤════════════════════════════════════════════╗
    -//	║ Go type                │ Conversion                                 ║
    -//	╠════════════════════════╪════════════════════════════════════════════╣
    -//	║ nil                    │ stored as NullValue                        ║
    -//	║ bool                   │ stored as BoolValue                        ║
    -//	║ int, int32, int64      │ stored as NumberValue                      ║
    -//	║ uint, uint32, uint64   │ stored as NumberValue                      ║
    -//	║ float32, float64       │ stored as NumberValue                      ║
    -//	║ string                 │ stored as StringValue; must be valid UTF-8 ║
    -//	║ []byte                 │ stored as StringValue; base64-encoded      ║
    -//	║ map[string]interface{} │ stored as StructValue                      ║
    -//	║ []interface{}          │ stored as ListValue                        ║
    -//	╚════════════════════════╧════════════════════════════════════════════╝
    +//	╔═══════════════════════════════════════╤════════════════════════════════════════════╗
    +//	║ Go type                               │ Conversion                                 ║
    +//	╠═══════════════════════════════════════╪════════════════════════════════════════════╣
    +//	║ nil                                   │ stored as NullValue                        ║
    +//	║ bool                                  │ stored as BoolValue                        ║
    +//	║ int, int8, int16, int32, int64        │ stored as NumberValue                      ║
    +//	║ uint, uint8, uint16, uint32, uint64   │ stored as NumberValue                      ║
    +//	║ float32, float64                      │ stored as NumberValue                      ║
    +//	║ json.Number                           │ stored as NumberValue                      ║
    +//	║ string                                │ stored as StringValue; must be valid UTF-8 ║
    +//	║ []byte                                │ stored as StringValue; base64-encoded      ║
    +//	║ map[string]any                        │ stored as StructValue                      ║
    +//	║ []any                                 │ stored as ListValue                        ║
    +//	╚═══════════════════════════════════════╧════════════════════════════════════════════╝
     //
     // When converting an int64 or uint64 to a NumberValue, numeric precision loss
     // is possible since they are stored as a float64.
    -func NewValue(v interface{}) (*Value, error) {
    +func NewValue(v any) (*Value, error) {
     	switch v := v.(type) {
     	case nil:
     		return NewNullValue(), nil
    @@ -320,12 +318,20 @@ func NewValue(v interface{}) (*Value, error) {
     		return NewBoolValue(v), nil
     	case int:
     		return NewNumberValue(float64(v)), nil
    +	case int8:
    +		return NewNumberValue(float64(v)), nil
    +	case int16:
    +		return NewNumberValue(float64(v)), nil
     	case int32:
     		return NewNumberValue(float64(v)), nil
     	case int64:
     		return NewNumberValue(float64(v)), nil
     	case uint:
     		return NewNumberValue(float64(v)), nil
    +	case uint8:
    +		return NewNumberValue(float64(v)), nil
    +	case uint16:
    +		return NewNumberValue(float64(v)), nil
     	case uint32:
     		return NewNumberValue(float64(v)), nil
     	case uint64:
    @@ -334,6 +340,12 @@ func NewValue(v interface{}) (*Value, error) {
     		return NewNumberValue(float64(v)), nil
     	case float64:
     		return NewNumberValue(float64(v)), nil
    +	case json.Number:
    +		n, err := v.Float64()
    +		if err != nil {
    +			return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
    +		}
    +		return NewNumberValue(n), nil
     	case string:
     		if !utf8.ValidString(v) {
     			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
    @@ -342,13 +354,13 @@ func NewValue(v interface{}) (*Value, error) {
     	case []byte:
     		s := base64.StdEncoding.EncodeToString(v)
     		return NewStringValue(s), nil
    -	case map[string]interface{}:
    +	case map[string]any:
     		v2, err := NewStruct(v)
     		if err != nil {
     			return nil, err
     		}
     		return NewStructValue(v2), nil
    -	case []interface{}:
    +	case []any:
     		v2, err := NewList(v)
     		if err != nil {
     			return nil, err
    @@ -396,7 +408,7 @@ func NewListValue(v *ListValue) *Value {
     //
     // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
     // converted as strings to remain compatible with MarshalJSON.
    -func (x *Value) AsInterface() interface{} {
    +func (x *Value) AsInterface() any {
     	switch v := x.GetKind().(type) {
     	case *Value_NumberValue:
     		if v != nil {
    @@ -441,11 +453,9 @@ func (x *Value) UnmarshalJSON(b []byte) error {
     
     func (x *Value) Reset() {
     	*x = Value{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_struct_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_struct_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Value) String() string {
    @@ -456,7 +466,7 @@ func (*Value) ProtoMessage() {}
     
     func (x *Value) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_struct_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -471,51 +481,63 @@ func (*Value) Descriptor() ([]byte, []int) {
     	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
     }
     
    -func (m *Value) GetKind() isValue_Kind {
    -	if m != nil {
    -		return m.Kind
    +func (x *Value) GetKind() isValue_Kind {
    +	if x != nil {
    +		return x.Kind
     	}
     	return nil
     }
     
     func (x *Value) GetNullValue() NullValue {
    -	if x, ok := x.GetKind().(*Value_NullValue); ok {
    -		return x.NullValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_NullValue); ok {
    +			return x.NullValue
    +		}
     	}
     	return NullValue_NULL_VALUE
     }
     
     func (x *Value) GetNumberValue() float64 {
    -	if x, ok := x.GetKind().(*Value_NumberValue); ok {
    -		return x.NumberValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_NumberValue); ok {
    +			return x.NumberValue
    +		}
     	}
     	return 0
     }
     
     func (x *Value) GetStringValue() string {
    -	if x, ok := x.GetKind().(*Value_StringValue); ok {
    -		return x.StringValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_StringValue); ok {
    +			return x.StringValue
    +		}
     	}
     	return ""
     }
     
     func (x *Value) GetBoolValue() bool {
    -	if x, ok := x.GetKind().(*Value_BoolValue); ok {
    -		return x.BoolValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_BoolValue); ok {
    +			return x.BoolValue
    +		}
     	}
     	return false
     }
     
     func (x *Value) GetStructValue() *Struct {
    -	if x, ok := x.GetKind().(*Value_StructValue); ok {
    -		return x.StructValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_StructValue); ok {
    +			return x.StructValue
    +		}
     	}
     	return nil
     }
     
     func (x *Value) GetListValue() *ListValue {
    -	if x, ok := x.GetKind().(*Value_ListValue); ok {
    -		return x.ListValue
    +	if x != nil {
    +		if x, ok := x.Kind.(*Value_ListValue); ok {
    +			return x.ListValue
    +		}
     	}
     	return nil
     }
    @@ -570,17 +592,16 @@ func (*Value_ListValue) isValue_Kind() {}
     //
     // The JSON representation for `ListValue` is JSON array.
     type ListValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Repeated field of dynamically typed values.
    -	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
    +	Values        []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // NewList constructs a ListValue from a general-purpose Go slice.
     // The slice elements are converted using NewValue.
    -func NewList(v []interface{}) (*ListValue, error) {
    +func NewList(v []any) (*ListValue, error) {
     	x := &ListValue{Values: make([]*Value, len(v))}
     	for i, v := range v {
     		var err error
    @@ -594,9 +615,9 @@ func NewList(v []interface{}) (*ListValue, error) {
     
     // AsSlice converts x to a general-purpose Go slice.
     // The slice elements are converted by calling Value.AsInterface.
    -func (x *ListValue) AsSlice() []interface{} {
    +func (x *ListValue) AsSlice() []any {
     	vals := x.GetValues()
    -	vs := make([]interface{}, len(vals))
    +	vs := make([]any, len(vals))
     	for i, v := range vals {
     		vs[i] = v.AsInterface()
     	}
    @@ -613,11 +634,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error {
     
     func (x *ListValue) Reset() {
     	*x = ListValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_struct_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_struct_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *ListValue) String() string {
    @@ -628,7 +647,7 @@ func (*ListValue) ProtoMessage() {}
     
     func (x *ListValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_struct_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -716,7 +735,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
     
     var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
     var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
    -var file_google_protobuf_struct_proto_goTypes = []interface{}{
    +var file_google_protobuf_struct_proto_goTypes = []any{
     	(NullValue)(0),    // 0: google.protobuf.NullValue
     	(*Struct)(nil),    // 1: google.protobuf.Struct
     	(*Value)(nil),     // 2: google.protobuf.Value
    @@ -742,45 +761,7 @@ func file_google_protobuf_struct_proto_init() {
     	if File_google_protobuf_struct_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Struct); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Value); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*ListValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
    -	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
    +	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
     		(*Value_NullValue)(nil),
     		(*Value_NumberValue)(nil),
     		(*Value_StringValue)(nil),
    diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    index 81511a3363..9550109aa3 100644
    --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
    @@ -170,10 +170,7 @@ import (
     // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
     // ) to obtain a formatter capable of generating timestamps in this format.
     type Timestamp struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// Represents seconds of UTC time since Unix epoch
     	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
     	// 9999-12-31T23:59:59Z inclusive.
    @@ -182,7 +179,9 @@ type Timestamp struct {
     	// second values with fractions must still have non-negative nanos values
     	// that count forward in time. Must be from 0 to 999,999,999
     	// inclusive.
    -	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
    +	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Now constructs a new Timestamp from the current time.
    @@ -254,11 +253,9 @@ func (x *Timestamp) check() uint {
     
     func (x *Timestamp) Reset() {
     	*x = Timestamp{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Timestamp) String() string {
    @@ -269,7 +266,7 @@ func (*Timestamp) ProtoMessage() {}
     
     func (x *Timestamp) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -332,7 +329,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
    -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
    +var file_google_protobuf_timestamp_proto_goTypes = []any{
     	(*Timestamp)(nil), // 0: google.protobuf.Timestamp
     }
     var file_google_protobuf_timestamp_proto_depIdxs = []int32{
    @@ -348,20 +345,6 @@ func file_google_protobuf_timestamp_proto_init() {
     	if File_google_protobuf_timestamp_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Timestamp); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    index 762a87130f..15b424ec12 100644
    --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
    @@ -54,12 +54,11 @@ import (
     //
     // The JSON representation for `DoubleValue` is JSON number.
     type DoubleValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The double value.
    -	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Double stores v in a new DoubleValue and returns a pointer to it.
    @@ -69,11 +68,9 @@ func Double(v float64) *DoubleValue {
     
     func (x *DoubleValue) Reset() {
     	*x = DoubleValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *DoubleValue) String() string {
    @@ -84,7 +81,7 @@ func (*DoubleValue) ProtoMessage() {}
     
     func (x *DoubleValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -110,12 +107,11 @@ func (x *DoubleValue) GetValue() float64 {
     //
     // The JSON representation for `FloatValue` is JSON number.
     type FloatValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The float value.
    -	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Float stores v in a new FloatValue and returns a pointer to it.
    @@ -125,11 +121,9 @@ func Float(v float32) *FloatValue {
     
     func (x *FloatValue) Reset() {
     	*x = FloatValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *FloatValue) String() string {
    @@ -140,7 +134,7 @@ func (*FloatValue) ProtoMessage() {}
     
     func (x *FloatValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -166,12 +160,11 @@ func (x *FloatValue) GetValue() float32 {
     //
     // The JSON representation for `Int64Value` is JSON string.
     type Int64Value struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The int64 value.
    -	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Int64 stores v in a new Int64Value and returns a pointer to it.
    @@ -181,11 +174,9 @@ func Int64(v int64) *Int64Value {
     
     func (x *Int64Value) Reset() {
     	*x = Int64Value{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Int64Value) String() string {
    @@ -196,7 +187,7 @@ func (*Int64Value) ProtoMessage() {}
     
     func (x *Int64Value) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -222,12 +213,11 @@ func (x *Int64Value) GetValue() int64 {
     //
     // The JSON representation for `UInt64Value` is JSON string.
     type UInt64Value struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The uint64 value.
    -	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // UInt64 stores v in a new UInt64Value and returns a pointer to it.
    @@ -237,11 +227,9 @@ func UInt64(v uint64) *UInt64Value {
     
     func (x *UInt64Value) Reset() {
     	*x = UInt64Value{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *UInt64Value) String() string {
    @@ -252,7 +240,7 @@ func (*UInt64Value) ProtoMessage() {}
     
     func (x *UInt64Value) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -278,12 +266,11 @@ func (x *UInt64Value) GetValue() uint64 {
     //
     // The JSON representation for `Int32Value` is JSON number.
     type Int32Value struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The int32 value.
    -	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Int32 stores v in a new Int32Value and returns a pointer to it.
    @@ -293,11 +280,9 @@ func Int32(v int32) *Int32Value {
     
     func (x *Int32Value) Reset() {
     	*x = Int32Value{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *Int32Value) String() string {
    @@ -308,7 +293,7 @@ func (*Int32Value) ProtoMessage() {}
     
     func (x *Int32Value) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -334,12 +319,11 @@ func (x *Int32Value) GetValue() int32 {
     //
     // The JSON representation for `UInt32Value` is JSON number.
     type UInt32Value struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The uint32 value.
    -	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // UInt32 stores v in a new UInt32Value and returns a pointer to it.
    @@ -349,11 +333,9 @@ func UInt32(v uint32) *UInt32Value {
     
     func (x *UInt32Value) Reset() {
     	*x = UInt32Value{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *UInt32Value) String() string {
    @@ -364,7 +346,7 @@ func (*UInt32Value) ProtoMessage() {}
     
     func (x *UInt32Value) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -390,12 +372,11 @@ func (x *UInt32Value) GetValue() uint32 {
     //
     // The JSON representation for `BoolValue` is JSON `true` and `false`.
     type BoolValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The bool value.
    -	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Bool stores v in a new BoolValue and returns a pointer to it.
    @@ -405,11 +386,9 @@ func Bool(v bool) *BoolValue {
     
     func (x *BoolValue) Reset() {
     	*x = BoolValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *BoolValue) String() string {
    @@ -420,7 +399,7 @@ func (*BoolValue) ProtoMessage() {}
     
     func (x *BoolValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -446,12 +425,11 @@ func (x *BoolValue) GetValue() bool {
     //
     // The JSON representation for `StringValue` is JSON string.
     type StringValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The string value.
    -	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // String stores v in a new StringValue and returns a pointer to it.
    @@ -461,11 +439,9 @@ func String(v string) *StringValue {
     
     func (x *StringValue) Reset() {
     	*x = StringValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *StringValue) String() string {
    @@ -476,7 +452,7 @@ func (*StringValue) ProtoMessage() {}
     
     func (x *StringValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -502,12 +478,11 @@ func (x *StringValue) GetValue() string {
     //
     // The JSON representation for `BytesValue` is JSON string.
     type BytesValue struct {
    -	state         protoimpl.MessageState
    -	sizeCache     protoimpl.SizeCache
    -	unknownFields protoimpl.UnknownFields
    -
    +	state protoimpl.MessageState `protogen:"open.v1"`
     	// The bytes value.
    -	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
    +	Value         []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
    +	unknownFields protoimpl.UnknownFields
    +	sizeCache     protoimpl.SizeCache
     }
     
     // Bytes stores v in a new BytesValue and returns a pointer to it.
    @@ -517,11 +492,9 @@ func Bytes(v []byte) *BytesValue {
     
     func (x *BytesValue) Reset() {
     	*x = BytesValue{}
    -	if protoimpl.UnsafeEnabled {
    -		mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
    -		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    -		ms.StoreMessageInfo(mi)
    -	}
    +	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
    +	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +	ms.StoreMessageInfo(mi)
     }
     
     func (x *BytesValue) String() string {
    @@ -532,7 +505,7 @@ func (*BytesValue) ProtoMessage() {}
     
     func (x *BytesValue) ProtoReflect() protoreflect.Message {
     	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
    -	if protoimpl.UnsafeEnabled && x != nil {
    +	if x != nil {
     		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
     		if ms.LoadMessageInfo() == nil {
     			ms.StoreMessageInfo(mi)
    @@ -605,7 +578,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
     }
     
     var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
    -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
    +var file_google_protobuf_wrappers_proto_goTypes = []any{
     	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
     	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
     	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
    @@ -629,116 +602,6 @@ func file_google_protobuf_wrappers_proto_init() {
     	if File_google_protobuf_wrappers_proto != nil {
     		return
     	}
    -	if !protoimpl.UnsafeEnabled {
    -		file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*DoubleValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*FloatValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Int64Value); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*UInt64Value); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*Int32Value); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*UInt32Value); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*BoolValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*StringValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -		file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
    -			switch v := v.(*BytesValue); i {
    -			case 0:
    -				return &v.state
    -			case 1:
    -				return &v.sizeCache
    -			case 2:
    -				return &v.unknownFields
    -			default:
    -				return nil
    -			}
    -		}
    -	}
     	type x struct{}
     	out := protoimpl.TypeBuilder{
     		File: protoimpl.DescBuilder{
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
    new file mode 100644
    index 0000000000..b7ed7f956d
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
    @@ -0,0 +1,6 @@
    +# editor and IDE paraphernalia
    +.idea
    +.vscode
    +
    +# macOS paraphernalia
    +.DS_Store
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
    new file mode 100644
    index 0000000000..df76d7d771
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
    @@ -0,0 +1,25 @@
    +Copyright (c) 2014, Evan Phoenix
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without 
    +modification, are permitted provided that the following conditions are met:
    +
    +* Redistributions of source code must retain the above copyright notice, this
    +  list of conditions and the following disclaimer.
    +* Redistributions in binary form must reproduce the above copyright notice,
    +  this list of conditions and the following disclaimer in the documentation
    +  and/or other materials provided with the distribution.
    +* Neither the name of the Evan Phoenix nor the names of its contributors 
    +  may be used to endorse or promote products derived from this software 
    +  without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
    +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
    +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
    +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
    +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
    +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
    +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
    +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
    new file mode 100644
    index 0000000000..28e3516937
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
    @@ -0,0 +1,317 @@
    +# JSON-Patch
    +`jsonpatch` is a library which provides functionality for both applying
    +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
    +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
    +
    +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
    +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
    +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
    +
    +# Get It!
    +
    +**Latest and greatest**: 
    +```bash
    +go get -u github.com/evanphx/json-patch/v5
    +```
    +
    +**Stable Versions**:
    +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
    +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
    +
    +(previous versions below `v3` are unavailable)
    +
    +# Use It!
    +* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
    +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
    +* [Comparing JSON documents](#comparing-json-documents)
    +* [Combine merge patches](#combine-merge-patches)
    +
    +
    +# Configuration
    +
    +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
    +  This defaults to `true` and enables the non-standard practice of allowing
    +  negative indices to mean indices starting at the end of an array. This
    +  functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
    +  false`.
    +
    +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
    +  which limits the total size increase in bytes caused by "copy" operations in a
    +  patch. It defaults to 0, which means there is no limit.
    +
    +These global variables control the behavior of `jsonpatch.Apply`.
    +
    +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
    +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
    +
    +Structure `jsonpatch.ApplyOptions` includes the configuration options above 
    +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
    +
    +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
    +`remove` operations whose `path` points to a non-existent location in the JSON document.
    +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
    +returning an error when hitting a missing `path` on `remove`.
    +
    +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
    +that `add` operations produce all the `path` elements that are missing from the target object.
    +
    +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
    +whose values are populated from the global configuration variables.
    +
    +## Create and apply a merge patch
    +Given both an original JSON document and a modified JSON document, you can create
    +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. 
    +
    +It can describe the changes needed to convert from the original to the 
    +modified JSON document.
    +
    +Once you have a merge patch, you can apply it to other JSON documents using the
    +`jsonpatch.MergePatch(document, patch)` function.
    +
    +```go
    +package main
    +
    +import (
    +	"fmt"
    +
    +	jsonpatch "github.com/evanphx/json-patch"
    +)
    +
    +func main() {
    +	// Let's create a merge patch from these two documents...
    +	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
    +	target := []byte(`{"name": "Jane", "age": 24}`)
    +
    +	patch, err := jsonpatch.CreateMergePatch(original, target)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	// Now lets apply the patch against a different JSON document...
    +
    +	alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
    +	modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
    +
    +	fmt.Printf("patch document:   %s\n", patch)
    +	fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
    +}
    +```
    +
    +When ran, you get the following output:
    +
    +```bash
    +$ go run main.go
    +patch document:   {"height":null,"name":"Jane"}
    +updated alternative doc: {"age":28,"name":"Jane"}
    +```
    +
    +## Create and apply a JSON Patch
    +You can create patch objects using `DecodePatch([]byte)`, which can then 
    +be applied against JSON documents.
    +
    +The following is an example of creating a patch from two operations, and
    +applying it against a JSON document.
    +
    +```go
    +package main
    +
    +import (
    +	"fmt"
    +
    +	jsonpatch "github.com/evanphx/json-patch"
    +)
    +
    +func main() {
    +	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
    +	patchJSON := []byte(`[
    +		{"op": "replace", "path": "/name", "value": "Jane"},
    +		{"op": "remove", "path": "/height"}
    +	]`)
    +
    +	patch, err := jsonpatch.DecodePatch(patchJSON)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	modified, err := patch.Apply(original)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	fmt.Printf("Original document: %s\n", original)
    +	fmt.Printf("Modified document: %s\n", modified)
    +}
    +```
    +
    +When ran, you get the following output:
    +
    +```bash
    +$ go run main.go
    +Original document: {"name": "John", "age": 24, "height": 3.21}
    +Modified document: {"age":24,"name":"Jane"}
    +```
    +
    +## Comparing JSON documents
    +Due to potential whitespace and ordering differences, one cannot simply compare
    +JSON strings or byte-arrays directly. 
    +
    +As such, you can instead use `jsonpatch.Equal(document1, document2)` to 
    +determine if two JSON documents are _structurally_ equal. This ignores
    +whitespace differences, and key-value ordering.
    +
    +```go
    +package main
    +
    +import (
    +	"fmt"
    +
    +	jsonpatch "github.com/evanphx/json-patch"
    +)
    +
    +func main() {
    +	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
    +	similar := []byte(`
    +		{
    +			"age": 24,
    +			"height": 3.21,
    +			"name": "John"
    +		}
    +	`)
    +	different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
    +
    +	if jsonpatch.Equal(original, similar) {
    +		fmt.Println(`"original" is structurally equal to "similar"`)
    +	}
    +
    +	if !jsonpatch.Equal(original, different) {
    +		fmt.Println(`"original" is _not_ structurally equal to "different"`)
    +	}
    +}
    +```
    +
    +When ran, you get the following output:
    +```bash
    +$ go run main.go
    +"original" is structurally equal to "similar"
    +"original" is _not_ structurally equal to "different"
    +```
    +
    +## Combine merge patches
    +Given two JSON merge patch documents, it is possible to combine them into a 
    +single merge patch which can describe both set of changes.
    +
    +The resulting merge patch can be used such that applying it results in a
    +document structurally similar as merging each merge patch to the document
    +in succession. 
    +
    +```go
    +package main
    +
    +import (
    +	"fmt"
    +
    +	jsonpatch "github.com/evanphx/json-patch"
    +)
    +
    +func main() {
    +	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
    +
    +	nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
    +	ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
    +
    +	// Let's combine these merge patch documents...
    +	combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	// Apply each patch individual against the original document
    +	withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	// Apply the combined patch against the original document
    +
    +	withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	// Do both result in the same thing? They should!
    +	if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
    +		fmt.Println("Both JSON documents are structurally the same!")
    +	}
    +
    +	fmt.Printf("combined merge patch: %s", combinedPatch)
    +}
    +```
    +
    +When ran, you get the following output:
    +```bash
    +$ go run main.go
    +Both JSON documents are structurally the same!
    +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
    +```
    +
    +# CLI for comparing JSON documents
    +You can install the commandline program `json-patch`.
    +
    +This program can take multiple JSON patch documents as arguments, 
    +and fed a JSON document from `stdin`. It will apply the patch(es) against 
    +the document and output the modified doc.
    +
    +**patch.1.json**
    +```json
    +[
    +    {"op": "replace", "path": "/name", "value": "Jane"},
    +    {"op": "remove", "path": "/height"}
    +]
    +```
    +
    +**patch.2.json**
    +```json
    +[
    +    {"op": "add", "path": "/address", "value": "123 Main St"},
    +    {"op": "replace", "path": "/age", "value": "21"}
    +]
    +```
    +
    +**document.json**
    +```json
    +{
    +    "name": "John",
    +    "age": 24,
    +    "height": 3.21
    +}
    +```
    +
    +You can then run:
    +
    +```bash
    +$ go install github.com/evanphx/json-patch/cmd/json-patch
    +$ cat document.json | json-patch -p patch.1.json -p patch.2.json
    +{"address":"123 Main St","age":"21","name":"Jane"}
    +```
    +
    +# Help It!
    +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
    +or [create a PR](https://github.com/evanphx/json-patch/compare).
    +
    +
    +Before creating a pull request, we'd ask that you make sure tests are passing
    +and that you have added new tests when applicable.
    +
    +Contributors can run tests using:
    +
    +```bash
    +go test -cover ./...
    +```
    +
    +Builds for pull requests are tested automatically 
    +using [TravisCI](https://travis-ci.org/evanphx/json-patch).
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go
    new file mode 100644
    index 0000000000..75304b4437
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go
    @@ -0,0 +1,38 @@
    +package jsonpatch
    +
    +import "fmt"
    +
    +// AccumulatedCopySizeError is an error type returned when the accumulated size
    +// increase caused by copy operations in a patch operation has exceeded the
    +// limit.
    +type AccumulatedCopySizeError struct {
    +	limit       int64
    +	accumulated int64
    +}
    +
    +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
    +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
    +	return &AccumulatedCopySizeError{limit: l, accumulated: a}
    +}
    +
    +// Error implements the error interface.
    +func (a *AccumulatedCopySizeError) Error() string {
    +	return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
    +}
    +
    +// ArraySizeError is an error type returned when the array size has exceeded
    +// the limit.
    +type ArraySizeError struct {
    +	limit int
    +	size  int
    +}
    +
    +// NewArraySizeError returns an ArraySizeError.
    +func NewArraySizeError(l, s int) *ArraySizeError {
    +	return &ArraySizeError{limit: l, size: s}
    +}
    +
    +// Error implements the error interface.
    +func (a *ArraySizeError) Error() string {
    +	return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
    +}
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go
    new file mode 100644
    index 0000000000..ad88d40181
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go
    @@ -0,0 +1,389 @@
    +package jsonpatch
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"reflect"
    +)
    +
    +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
    +	curDoc, err := cur.intoDoc()
    +
    +	if err != nil {
    +		pruneNulls(patch)
    +		return patch
    +	}
    +
    +	patchDoc, err := patch.intoDoc()
    +
    +	if err != nil {
    +		return patch
    +	}
    +
    +	mergeDocs(curDoc, patchDoc, mergeMerge)
    +
    +	return cur
    +}
    +
    +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
    +	for k, v := range *patch {
    +		if v == nil {
    +			if mergeMerge {
    +				(*doc)[k] = nil
    +			} else {
    +				delete(*doc, k)
    +			}
    +		} else {
    +			cur, ok := (*doc)[k]
    +
    +			if !ok || cur == nil {
    +				if !mergeMerge {
    +					pruneNulls(v)
    +				}
    +
    +				(*doc)[k] = v
    +			} else {
    +				(*doc)[k] = merge(cur, v, mergeMerge)
    +			}
    +		}
    +	}
    +}
    +
    +func pruneNulls(n *lazyNode) {
    +	sub, err := n.intoDoc()
    +
    +	if err == nil {
    +		pruneDocNulls(sub)
    +	} else {
    +		ary, err := n.intoAry()
    +
    +		if err == nil {
    +			pruneAryNulls(ary)
    +		}
    +	}
    +}
    +
    +func pruneDocNulls(doc *partialDoc) *partialDoc {
    +	for k, v := range *doc {
    +		if v == nil {
    +			delete(*doc, k)
    +		} else {
    +			pruneNulls(v)
    +		}
    +	}
    +
    +	return doc
    +}
    +
    +func pruneAryNulls(ary *partialArray) *partialArray {
    +	newAry := []*lazyNode{}
    +
    +	for _, v := range *ary {
    +		if v != nil {
    +			pruneNulls(v)
    +		}
    +		newAry = append(newAry, v)
    +	}
    +
    +	*ary = newAry
    +
    +	return ary
    +}
    +
    +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
    +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
    +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
    +
    +// MergeMergePatches merges two merge patches together, such that
    +// applying this resulting merged merge patch to a document yields the same
    +// as merging each merge patch to the document in succession.
    +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
    +	return doMergePatch(patch1Data, patch2Data, true)
    +}
    +
    +// MergePatch merges the patchData into the docData.
    +func MergePatch(docData, patchData []byte) ([]byte, error) {
    +	return doMergePatch(docData, patchData, false)
    +}
    +
    +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
    +	doc := &partialDoc{}
    +
    +	docErr := json.Unmarshal(docData, doc)
    +
    +	patch := &partialDoc{}
    +
    +	patchErr := json.Unmarshal(patchData, patch)
    +
    +	if _, ok := docErr.(*json.SyntaxError); ok {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	if _, ok := patchErr.(*json.SyntaxError); ok {
    +		return nil, ErrBadJSONPatch
    +	}
    +
    +	if docErr == nil && *doc == nil {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	if patchErr == nil && *patch == nil {
    +		return nil, ErrBadJSONPatch
    +	}
    +
    +	if docErr != nil || patchErr != nil {
    +		// Not an error, just not a doc, so we turn straight into the patch
    +		if patchErr == nil {
    +			if mergeMerge {
    +				doc = patch
    +			} else {
    +				doc = pruneDocNulls(patch)
    +			}
    +		} else {
    +			patchAry := &partialArray{}
    +			patchErr = json.Unmarshal(patchData, patchAry)
    +
    +			if patchErr != nil {
    +				return nil, ErrBadJSONPatch
    +			}
    +
    +			pruneAryNulls(patchAry)
    +
    +			out, patchErr := json.Marshal(patchAry)
    +
    +			if patchErr != nil {
    +				return nil, ErrBadJSONPatch
    +			}
    +
    +			return out, nil
    +		}
    +	} else {
    +		mergeDocs(doc, patch, mergeMerge)
    +	}
    +
    +	return json.Marshal(doc)
    +}
    +
    +// resemblesJSONArray indicates whether the byte-slice "appears" to be
    +// a JSON array or not.
    +// False-positives are possible, as this function does not check the internal
    +// structure of the array. It only checks that the outer syntax is present and
    +// correct.
    +func resemblesJSONArray(input []byte) bool {
    +	input = bytes.TrimSpace(input)
    +
    +	hasPrefix := bytes.HasPrefix(input, []byte("["))
    +	hasSuffix := bytes.HasSuffix(input, []byte("]"))
    +
    +	return hasPrefix && hasSuffix
    +}
    +
    +// CreateMergePatch will return a merge patch document capable of converting
    +// the original document(s) to the modified document(s).
    +// The parameters can be bytes of either two JSON Documents, or two arrays of
    +// JSON documents.
    +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
    +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
    +	originalResemblesArray := resemblesJSONArray(originalJSON)
    +	modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
    +
    +	// Do both byte-slices seem like JSON arrays?
    +	if originalResemblesArray && modifiedResemblesArray {
    +		return createArrayMergePatch(originalJSON, modifiedJSON)
    +	}
    +
    +	// Are both byte-slices are not arrays? Then they are likely JSON objects...
    +	if !originalResemblesArray && !modifiedResemblesArray {
    +		return createObjectMergePatch(originalJSON, modifiedJSON)
    +	}
    +
    +	// None of the above? Then return an error because of mismatched types.
    +	return nil, errBadMergeTypes
    +}
    +
    +// createObjectMergePatch will return a merge-patch document capable of
    +// converting the original document to the modified document.
    +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
    +	originalDoc := map[string]interface{}{}
    +	modifiedDoc := map[string]interface{}{}
    +
    +	err := json.Unmarshal(originalJSON, &originalDoc)
    +	if err != nil {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	err = json.Unmarshal(modifiedJSON, &modifiedDoc)
    +	if err != nil {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	dest, err := getDiff(originalDoc, modifiedDoc)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return json.Marshal(dest)
    +}
    +
    +// createArrayMergePatch will return an array of merge-patch documents capable
    +// of converting the original document to the modified document for each
    +// pair of JSON documents provided in the arrays.
    +// Arrays of mismatched sizes will result in an error.
    +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
    +	originalDocs := []json.RawMessage{}
    +	modifiedDocs := []json.RawMessage{}
    +
    +	err := json.Unmarshal(originalJSON, &originalDocs)
    +	if err != nil {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	err = json.Unmarshal(modifiedJSON, &modifiedDocs)
    +	if err != nil {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	total := len(originalDocs)
    +	if len(modifiedDocs) != total {
    +		return nil, ErrBadJSONDoc
    +	}
    +
    +	result := []json.RawMessage{}
    +	for i := 0; i < len(originalDocs); i++ {
    +		original := originalDocs[i]
    +		modified := modifiedDocs[i]
    +
    +		patch, err := createObjectMergePatch(original, modified)
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		result = append(result, json.RawMessage(patch))
    +	}
    +
    +	return json.Marshal(result)
    +}
    +
    +// Returns true if the array matches (must be json types).
    +// As is idiomatic for go, an empty array is not the same as a nil array.
    +func matchesArray(a, b []interface{}) bool {
    +	if len(a) != len(b) {
    +		return false
    +	}
    +	if (a == nil && b != nil) || (a != nil && b == nil) {
    +		return false
    +	}
    +	for i := range a {
    +		if !matchesValue(a[i], b[i]) {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +// Returns true if the values matches (must be json types)
    +// The types of the values must match, otherwise it will always return false
    +// If two map[string]interface{} are given, all elements must match.
    +func matchesValue(av, bv interface{}) bool {
    +	if reflect.TypeOf(av) != reflect.TypeOf(bv) {
    +		return false
    +	}
    +	switch at := av.(type) {
    +	case string:
    +		bt := bv.(string)
    +		if bt == at {
    +			return true
    +		}
    +	case float64:
    +		bt := bv.(float64)
    +		if bt == at {
    +			return true
    +		}
    +	case bool:
    +		bt := bv.(bool)
    +		if bt == at {
    +			return true
    +		}
    +	case nil:
    +		// Both nil, fine.
    +		return true
    +	case map[string]interface{}:
    +		bt := bv.(map[string]interface{})
    +		if len(bt) != len(at) {
    +			return false
    +		}
    +		for key := range bt {
    +			av, aOK := at[key]
    +			bv, bOK := bt[key]
    +			if aOK != bOK {
    +				return false
    +			}
    +			if !matchesValue(av, bv) {
    +				return false
    +			}
    +		}
    +		return true
    +	case []interface{}:
    +		bt := bv.([]interface{})
    +		return matchesArray(at, bt)
    +	}
    +	return false
    +}
    +
    +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
    +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
    +	into := map[string]interface{}{}
    +	for key, bv := range b {
    +		av, ok := a[key]
    +		// value was added
    +		if !ok {
    +			into[key] = bv
    +			continue
    +		}
    +		// If types have changed, replace completely
    +		if reflect.TypeOf(av) != reflect.TypeOf(bv) {
    +			into[key] = bv
    +			continue
    +		}
    +		// Types are the same, compare values
    +		switch at := av.(type) {
    +		case map[string]interface{}:
    +			bt := bv.(map[string]interface{})
    +			dst := make(map[string]interface{}, len(bt))
    +			dst, err := getDiff(at, bt)
    +			if err != nil {
    +				return nil, err
    +			}
    +			if len(dst) > 0 {
    +				into[key] = dst
    +			}
    +		case string, float64, bool:
    +			if !matchesValue(av, bv) {
    +				into[key] = bv
    +			}
    +		case []interface{}:
    +			bt := bv.([]interface{})
    +			if !matchesArray(at, bt) {
    +				into[key] = bv
    +			}
    +		case nil:
    +			switch bv.(type) {
    +			case nil:
    +				// Both nil, fine.
    +			default:
    +				into[key] = bv
    +			}
    +		default:
    +			panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
    +		}
    +	}
    +	// Now add all deleted values as nil
    +	for key := range a {
    +		_, found := b[key]
    +		if !found {
    +			into[key] = nil
    +		}
    +	}
    +	return into, nil
    +}
    diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
    new file mode 100644
    index 0000000000..dc2b7e51e6
    --- /dev/null
    +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
    @@ -0,0 +1,851 @@
    +package jsonpatch
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"strconv"
    +	"strings"
    +
    +	"github.com/pkg/errors"
    +)
    +
    +const (
    +	eRaw = iota
    +	eDoc
    +	eAry
    +)
    +
    +var (
    +	// SupportNegativeIndices decides whether to support non-standard practice of
    +	// allowing negative indices to mean indices starting at the end of an array.
    +	// Default to true.
    +	SupportNegativeIndices bool = true
    +	// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
    +	// "copy" operations in a patch.
    +	AccumulatedCopySizeLimit int64 = 0
    +)
    +
    +var (
    +	ErrTestFailed   = errors.New("test failed")
    +	ErrMissing      = errors.New("missing value")
    +	ErrUnknownType  = errors.New("unknown object type")
    +	ErrInvalid      = errors.New("invalid state detected")
    +	ErrInvalidIndex = errors.New("invalid index referenced")
    +)
    +
    +type lazyNode struct {
    +	raw   *json.RawMessage
    +	doc   partialDoc
    +	ary   partialArray
    +	which int
    +}
    +
    +// Operation is a single JSON-Patch step, such as a single 'add' operation.
    +type Operation map[string]*json.RawMessage
    +
    +// Patch is an ordered collection of Operations.
    +type Patch []Operation
    +
    +type partialDoc map[string]*lazyNode
    +type partialArray []*lazyNode
    +
    +type container interface {
    +	get(key string) (*lazyNode, error)
    +	set(key string, val *lazyNode) error
    +	add(key string, val *lazyNode) error
    +	remove(key string) error
    +}
    +
    +func newLazyNode(raw *json.RawMessage) *lazyNode {
    +	return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
    +}
    +
    +func (n *lazyNode) MarshalJSON() ([]byte, error) {
    +	switch n.which {
    +	case eRaw:
    +		return json.Marshal(n.raw)
    +	case eDoc:
    +		return json.Marshal(n.doc)
    +	case eAry:
    +		return json.Marshal(n.ary)
    +	default:
    +		return nil, ErrUnknownType
    +	}
    +}
    +
    +func (n *lazyNode) UnmarshalJSON(data []byte) error {
    +	dest := make(json.RawMessage, len(data))
    +	copy(dest, data)
    +	n.raw = &dest
    +	n.which = eRaw
    +	return nil
    +}
    +
    +func deepCopy(src *lazyNode) (*lazyNode, int, error) {
    +	if src == nil {
    +		return nil, 0, nil
    +	}
    +	a, err := src.MarshalJSON()
    +	if err != nil {
    +		return nil, 0, err
    +	}
    +	sz := len(a)
    +	ra := make(json.RawMessage, sz)
    +	copy(ra, a)
    +	return newLazyNode(&ra), sz, nil
    +}
    +
    +func (n *lazyNode) intoDoc() (*partialDoc, error) {
    +	if n.which == eDoc {
    +		return &n.doc, nil
    +	}
    +
    +	if n.raw == nil {
    +		return nil, ErrInvalid
    +	}
    +
    +	err := json.Unmarshal(*n.raw, &n.doc)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	n.which = eDoc
    +	return &n.doc, nil
    +}
    +
    +func (n *lazyNode) intoAry() (*partialArray, error) {
    +	if n.which == eAry {
    +		return &n.ary, nil
    +	}
    +
    +	if n.raw == nil {
    +		return nil, ErrInvalid
    +	}
    +
    +	err := json.Unmarshal(*n.raw, &n.ary)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	n.which = eAry
    +	return &n.ary, nil
    +}
    +
    +func (n *lazyNode) compact() []byte {
    +	buf := &bytes.Buffer{}
    +
    +	if n.raw == nil {
    +		return nil
    +	}
    +
    +	err := json.Compact(buf, *n.raw)
    +
    +	if err != nil {
    +		return *n.raw
    +	}
    +
    +	return buf.Bytes()
    +}
    +
    +func (n *lazyNode) tryDoc() bool {
    +	if n.raw == nil {
    +		return false
    +	}
    +
    +	err := json.Unmarshal(*n.raw, &n.doc)
    +
    +	if err != nil {
    +		return false
    +	}
    +
    +	n.which = eDoc
    +	return true
    +}
    +
    +func (n *lazyNode) tryAry() bool {
    +	if n.raw == nil {
    +		return false
    +	}
    +
    +	err := json.Unmarshal(*n.raw, &n.ary)
    +
    +	if err != nil {
    +		return false
    +	}
    +
    +	n.which = eAry
    +	return true
    +}
    +
    +func (n *lazyNode) equal(o *lazyNode) bool {
    +	if n.which == eRaw {
    +		if !n.tryDoc() && !n.tryAry() {
    +			if o.which != eRaw {
    +				return false
    +			}
    +
    +			return bytes.Equal(n.compact(), o.compact())
    +		}
    +	}
    +
    +	if n.which == eDoc {
    +		if o.which == eRaw {
    +			if !o.tryDoc() {
    +				return false
    +			}
    +		}
    +
    +		if o.which != eDoc {
    +			return false
    +		}
    +
    +		if len(n.doc) != len(o.doc) {
    +			return false
    +		}
    +
    +		for k, v := range n.doc {
    +			ov, ok := o.doc[k]
    +
    +			if !ok {
    +				return false
    +			}
    +
    +			if (v == nil) != (ov == nil) {
    +				return false
    +			}
    +
    +			if v == nil && ov == nil {
    +				continue
    +			}
    +
    +			if !v.equal(ov) {
    +				return false
    +			}
    +		}
    +
    +		return true
    +	}
    +
    +	if o.which != eAry && !o.tryAry() {
    +		return false
    +	}
    +
    +	if len(n.ary) != len(o.ary) {
    +		return false
    +	}
    +
    +	for idx, val := range n.ary {
    +		if !val.equal(o.ary[idx]) {
    +			return false
    +		}
    +	}
    +
    +	return true
    +}
    +
    +// Kind reads the "op" field of the Operation.
    +func (o Operation) Kind() string {
    +	if obj, ok := o["op"]; ok && obj != nil {
    +		var op string
    +
    +		err := json.Unmarshal(*obj, &op)
    +
    +		if err != nil {
    +			return "unknown"
    +		}
    +
    +		return op
    +	}
    +
    +	return "unknown"
    +}
    +
    +// Path reads the "path" field of the Operation.
    +func (o Operation) Path() (string, error) {
    +	if obj, ok := o["path"]; ok && obj != nil {
    +		var op string
    +
    +		err := json.Unmarshal(*obj, &op)
    +
    +		if err != nil {
    +			return "unknown", err
    +		}
    +
    +		return op, nil
    +	}
    +
    +	return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
    +}
    +
    +// From reads the "from" field of the Operation.
    +func (o Operation) From() (string, error) {
    +	if obj, ok := o["from"]; ok && obj != nil {
    +		var op string
    +
    +		err := json.Unmarshal(*obj, &op)
    +
    +		if err != nil {
    +			return "unknown", err
    +		}
    +
    +		return op, nil
    +	}
    +
    +	return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
    +}
    +
    +func (o Operation) value() *lazyNode {
    +	if obj, ok := o["value"]; ok {
    +		return newLazyNode(obj)
    +	}
    +
    +	return nil
    +}
    +
    +// ValueInterface decodes the operation value into an interface.
    +func (o Operation) ValueInterface() (interface{}, error) {
    +	if obj, ok := o["value"]; ok && obj != nil {
    +		var v interface{}
    +
    +		err := json.Unmarshal(*obj, &v)
    +
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		return v, nil
    +	}
    +
    +	return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
    +}
    +
    +func isArray(buf []byte) bool {
    +Loop:
    +	for _, c := range buf {
    +		switch c {
    +		case ' ':
    +		case '\n':
    +		case '\t':
    +			continue
    +		case '[':
    +			return true
    +		default:
    +			break Loop
    +		}
    +	}
    +
    +	return false
    +}
    +
    +func findObject(pd *container, path string) (container, string) {
    +	doc := *pd
    +
    +	split := strings.Split(path, "/")
    +
    +	if len(split) < 2 {
    +		return nil, ""
    +	}
    +
    +	parts := split[1 : len(split)-1]
    +
    +	key := split[len(split)-1]
    +
    +	var err error
    +
    +	for _, part := range parts {
    +
    +		next, ok := doc.get(decodePatchKey(part))
    +
    +		if next == nil || ok != nil {
    +			return nil, ""
    +		}
    +
    +		if isArray(*next.raw) {
    +			doc, err = next.intoAry()
    +
    +			if err != nil {
    +				return nil, ""
    +			}
    +		} else {
    +			doc, err = next.intoDoc()
    +
    +			if err != nil {
    +				return nil, ""
    +			}
    +		}
    +	}
    +
    +	return doc, decodePatchKey(key)
    +}
    +
    +func (d *partialDoc) set(key string, val *lazyNode) error {
    +	(*d)[key] = val
    +	return nil
    +}
    +
    +func (d *partialDoc) add(key string, val *lazyNode) error {
    +	(*d)[key] = val
    +	return nil
    +}
    +
    +func (d *partialDoc) get(key string) (*lazyNode, error) {
    +	return (*d)[key], nil
    +}
    +
    +func (d *partialDoc) remove(key string) error {
    +	_, ok := (*d)[key]
    +	if !ok {
    +		return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
    +	}
    +
    +	delete(*d, key)
    +	return nil
    +}
    +
    +// set should only be used to implement the "replace" operation, so "key" must
    +// be an already existing index in "d".
    +func (d *partialArray) set(key string, val *lazyNode) error {
    +	idx, err := strconv.Atoi(key)
    +	if err != nil {
    +		return err
    +	}
    +
    +	if idx < 0 {
    +		if !SupportNegativeIndices {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		if idx < -len(*d) {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		idx += len(*d)
    +	}
    +
    +	(*d)[idx] = val
    +	return nil
    +}
    +
    +func (d *partialArray) add(key string, val *lazyNode) error {
    +	if key == "-" {
    +		*d = append(*d, val)
    +		return nil
    +	}
    +
    +	idx, err := strconv.Atoi(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
    +	}
    +
    +	sz := len(*d) + 1
    +
    +	ary := make([]*lazyNode, sz)
    +
    +	cur := *d
    +
    +	if idx >= len(ary) {
    +		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +	}
    +
    +	if idx < 0 {
    +		if !SupportNegativeIndices {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		if idx < -len(ary) {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		idx += len(ary)
    +	}
    +
    +	copy(ary[0:idx], cur[0:idx])
    +	ary[idx] = val
    +	copy(ary[idx+1:], cur[idx:])
    +
    +	*d = ary
    +	return nil
    +}
    +
    +func (d *partialArray) get(key string) (*lazyNode, error) {
    +	idx, err := strconv.Atoi(key)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	if idx < 0 {
    +		if !SupportNegativeIndices {
    +			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		if idx < -len(*d) {
    +			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		idx += len(*d)
    +	}
    +
    +	if idx >= len(*d) {
    +		return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +	}
    +
    +	return (*d)[idx], nil
    +}
    +
    +func (d *partialArray) remove(key string) error {
    +	idx, err := strconv.Atoi(key)
    +	if err != nil {
    +		return err
    +	}
    +
    +	cur := *d
    +
    +	if idx >= len(cur) {
    +		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +	}
    +
    +	if idx < 0 {
    +		if !SupportNegativeIndices {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		if idx < -len(cur) {
    +			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
    +		}
    +		idx += len(cur)
    +	}
    +
    +	ary := make([]*lazyNode, len(cur)-1)
    +
    +	copy(ary[0:idx], cur[0:idx])
    +	copy(ary[idx:], cur[idx+1:])
    +
    +	*d = ary
    +	return nil
    +
    +}
    +
    +func (p Patch) add(doc *container, op Operation) error {
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(ErrMissing, "add operation failed to decode path")
    +	}
    +
    +	con, key := findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
    +	}
    +
    +	err = con.add(key, op.value())
    +	if err != nil {
    +		return errors.Wrapf(err, "error in add for path: '%s'", path)
    +	}
    +
    +	return nil
    +}
    +
    +func (p Patch) remove(doc *container, op Operation) error {
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
    +	}
    +
    +	con, key := findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
    +	}
    +
    +	err = con.remove(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in remove for path: '%s'", path)
    +	}
    +
    +	return nil
    +}
    +
    +func (p Patch) replace(doc *container, op Operation) error {
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(err, "replace operation failed to decode path")
    +	}
    +
    +	if path == "" {
    +		val := op.value()
    +
    +		if val.which == eRaw {
    +			if !val.tryDoc() {
    +				if !val.tryAry() {
    +					return errors.Wrapf(err, "replace operation value must be object or array")
    +				}
    +			}
    +		}
    +
    +		switch val.which {
    +		case eAry:
    +			*doc = &val.ary
    +		case eDoc:
    +			*doc = &val.doc
    +		case eRaw:
    +			return errors.Wrapf(err, "replace operation hit impossible case")
    +		}
    +
    +		return nil
    +	}
    +
    +	con, key := findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
    +	}
    +
    +	_, ok := con.get(key)
    +	if ok != nil {
    +		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
    +	}
    +
    +	err = con.set(key, op.value())
    +	if err != nil {
    +		return errors.Wrapf(err, "error in remove for path: '%s'", path)
    +	}
    +
    +	return nil
    +}
    +
    +func (p Patch) move(doc *container, op Operation) error {
    +	from, err := op.From()
    +	if err != nil {
    +		return errors.Wrapf(err, "move operation failed to decode from")
    +	}
    +
    +	con, key := findObject(doc, from)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
    +	}
    +
    +	val, err := con.get(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in move for path: '%s'", key)
    +	}
    +
    +	err = con.remove(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in move for path: '%s'", key)
    +	}
    +
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(err, "move operation failed to decode path")
    +	}
    +
    +	con, key = findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
    +	}
    +
    +	err = con.add(key, val)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in move for path: '%s'", path)
    +	}
    +
    +	return nil
    +}
    +
    +func (p Patch) test(doc *container, op Operation) error {
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(err, "test operation failed to decode path")
    +	}
    +
    +	if path == "" {
    +		var self lazyNode
    +
    +		switch sv := (*doc).(type) {
    +		case *partialDoc:
    +			self.doc = *sv
    +			self.which = eDoc
    +		case *partialArray:
    +			self.ary = *sv
    +			self.which = eAry
    +		}
    +
    +		if self.equal(op.value()) {
    +			return nil
    +		}
    +
    +		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +	}
    +
    +	con, key := findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
    +	}
    +
    +	val, err := con.get(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in test for path: '%s'", path)
    +	}
    +
    +	if val == nil {
    +		if op.value().raw == nil {
    +			return nil
    +		}
    +		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +	} else if op.value() == nil {
    +		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +	}
    +
    +	if val.equal(op.value()) {
    +		return nil
    +	}
    +
    +	return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
    +}
    +
    +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
    +	from, err := op.From()
    +	if err != nil {
    +		return errors.Wrapf(err, "copy operation failed to decode from")
    +	}
    +
    +	con, key := findObject(doc, from)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
    +	}
    +
    +	val, err := con.get(key)
    +	if err != nil {
    +		return errors.Wrapf(err, "error in copy for from: '%s'", from)
    +	}
    +
    +	path, err := op.Path()
    +	if err != nil {
    +		return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
    +	}
    +
    +	con, key = findObject(doc, path)
    +
    +	if con == nil {
    +		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
    +	}
    +
    +	valCopy, sz, err := deepCopy(val)
    +	if err != nil {
    +		return errors.Wrapf(err, "error while performing deep copy")
    +	}
    +
    +	(*accumulatedCopySize) += int64(sz)
    +	if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
    +		return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
    +	}
    +
    +	err = con.add(key, valCopy)
    +	if err != nil {
    +		return errors.Wrapf(err, "error while adding value during copy")
    +	}
    +
    +	return nil
    +}
    +
    +// Equal indicates if 2 JSON documents have the same structural equality.
    +func Equal(a, b []byte) bool {
    +	ra := make(json.RawMessage, len(a))
    +	copy(ra, a)
    +	la := newLazyNode(&ra)
    +
    +	rb := make(json.RawMessage, len(b))
    +	copy(rb, b)
    +	lb := newLazyNode(&rb)
    +
    +	return la.equal(lb)
    +}
    +
    +// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
    +func DecodePatch(buf []byte) (Patch, error) {
    +	var p Patch
    +
    +	err := json.Unmarshal(buf, &p)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return p, nil
    +}
    +
    +// Apply mutates a JSON document according to the patch, and returns the new
    +// document.
    +func (p Patch) Apply(doc []byte) ([]byte, error) {
    +	return p.ApplyIndent(doc, "")
    +}
    +
    +// ApplyIndent mutates a JSON document according to the patch, and returns the new
    +// document indented.
    +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
    +	if len(doc) == 0 {
    +		return doc, nil
    +	}
    +
    +	var pd container
    +	if doc[0] == '[' {
    +		pd = &partialArray{}
    +	} else {
    +		pd = &partialDoc{}
    +	}
    +
    +	err := json.Unmarshal(doc, pd)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	err = nil
    +
    +	var accumulatedCopySize int64
    +
    +	for _, op := range p {
    +		switch op.Kind() {
    +		case "add":
    +			err = p.add(&pd, op)
    +		case "remove":
    +			err = p.remove(&pd, op)
    +		case "replace":
    +			err = p.replace(&pd, op)
    +		case "move":
    +			err = p.move(&pd, op)
    +		case "test":
    +			err = p.test(&pd, op)
    +		case "copy":
    +			err = p.copy(&pd, op, &accumulatedCopySize)
    +		default:
    +			err = fmt.Errorf("Unexpected kind: %s", op.Kind())
    +		}
    +
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	if indent != "" {
    +		return json.MarshalIndent(pd, "", indent)
    +	}
    +
    +	return json.Marshal(pd)
    +}
    +
    +// From http://tools.ietf.org/html/rfc6901#section-4 :
    +//
    +// Evaluation of each reference token begins by decoding any escaped
    +// character sequence.  This is performed by first transforming any
    +// occurrence of the sequence '~1' to '/', and then transforming any
    +// occurrence of the sequence '~0' to '~'.
    +
    +var (
    +	rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
    +)
    +
    +func decodePatchKey(k string) string {
    +	return rfc6901Decoder.Replace(k)
    +}
    diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go
    index cbc6bb59dd..e7df9f629c 100644
    --- a/vendor/k8s.io/api/admission/v1/doc.go
    +++ b/vendor/k8s.io/api/admission/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=false
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=admission.k8s.io
     
     package v1 // import "k8s.io/api/admission/v1"
    diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto
    index 941deb4fb4..9648aa58fb 100644
    --- a/vendor/k8s.io/api/admission/v1/generated.proto
    +++ b/vendor/k8s.io/api/admission/v1/generated.proto
    @@ -38,10 +38,10 @@ message AdmissionRequest {
       optional string uid = 1;
     
       // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
     
       // Resource is the fully-qualified resource being requested (for example, v1.pods)
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
     
       // SubResource is the subresource being requested, if any (for example, "status" or "scale")
       // +optional
    @@ -58,7 +58,7 @@ message AdmissionRequest {
       //
       // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
     
       // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
       // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
    @@ -71,7 +71,7 @@ message AdmissionRequest {
       //
       // See documentation for the "matchPolicy" field in the webhook configuration type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
     
       // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
       // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
    @@ -93,15 +93,15 @@ message AdmissionRequest {
       optional string operation = 7;
     
       // UserInfo is information about the requesting user
    -  optional k8s.io.api.authentication.v1.UserInfo userInfo = 8;
    +  optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8;
     
       // Object is the object from the incoming request.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
     
       // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
     
       // DryRun indicates that modifications will definitely not be persisted for this request.
       // Defaults to false.
    @@ -114,7 +114,7 @@ message AdmissionRequest {
       // Operation might be a CREATE, in which case the Options will a
       // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
     }
     
     // AdmissionResponse describes an admission response.
    @@ -129,7 +129,7 @@ message AdmissionResponse {
       // Result contains extra details into why an admission request was denied.
       // This field IS NOT consulted in any way if "Allowed" is "true".
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
     
       // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
       // +optional
    diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go
    index 556fd1ad54..2def92da5b 100644
    --- a/vendor/k8s.io/api/admission/v1/types.go
    +++ b/vendor/k8s.io/api/admission/v1/types.go
    @@ -24,6 +24,7 @@ import (
     )
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // AdmissionReview describes an admission review request/response.
     type AdmissionReview struct {
    diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..ac81d993c6
    --- /dev/null
    +++ b/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,28 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto
    index ff0fa46d25..d27c05b727 100644
    --- a/vendor/k8s.io/api/admission/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto
    @@ -38,10 +38,10 @@ message AdmissionRequest {
       optional string uid = 1;
     
       // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
     
       // Resource is the fully-qualified resource being requested (for example, v1.pods)
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
     
       // SubResource is the subresource being requested, if any (for example, "status" or "scale")
       // +optional
    @@ -58,7 +58,7 @@ message AdmissionRequest {
       //
       // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
     
       // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
       // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
    @@ -71,7 +71,7 @@ message AdmissionRequest {
       //
       // See documentation for the "matchPolicy" field in the webhook configuration type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
     
       // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
       // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
    @@ -93,15 +93,15 @@ message AdmissionRequest {
       optional string operation = 7;
     
       // UserInfo is information about the requesting user
    -  optional k8s.io.api.authentication.v1.UserInfo userInfo = 8;
    +  optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8;
     
       // Object is the object from the incoming request.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
     
       // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
     
       // DryRun indicates that modifications will definitely not be persisted for this request.
       // Defaults to false.
    @@ -114,7 +114,7 @@ message AdmissionRequest {
       // Operation might be a CREATE, in which case the Options will a
       // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
       // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
     }
     
     // AdmissionResponse describes an admission response.
    @@ -129,7 +129,7 @@ message AdmissionResponse {
       // Result contains extra details into why an admission request was denied.
       // This field IS NOT consulted in any way if "Allowed" is "true".
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
     
       // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
       // +optional
    diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go
    index c3940f090c..ca0086188a 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1/doc.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=admissionregistration.k8s.io
     
     // Package v1 is the v1 version of the API.
    diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto
    index 44589007a2..e856e9eaf2 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto
    +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto
    @@ -156,7 +156,7 @@ message MatchResources {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
     
       // ObjectSelector decides whether to run the validation based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -170,7 +170,7 @@ message MatchResources {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
     
       // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
       // The policy cares about an operation if it matches _any_ Rule.
    @@ -290,7 +290,7 @@ message MutatingWebhook {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
     
       // ObjectSelector decides whether to run the webhook based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -304,7 +304,7 @@ message MutatingWebhook {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
     
       // SideEffects states whether this webhook has side effects.
       // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
    @@ -374,7 +374,7 @@ message MutatingWebhook {
     message MutatingWebhookConfiguration {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Webhooks is a list of webhooks and the affected resources and operations.
       // +optional
    @@ -390,7 +390,7 @@ message MutatingWebhookConfigurationList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of MutatingWebhookConfiguration.
       repeated MutatingWebhookConfiguration items = 2;
    @@ -463,7 +463,7 @@ message ParamRef {
       // mutually exclusive properties. If one is set, the other must be unset.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     
       // `parameterNotFoundAction` controls the behavior of the binding when the resource
       // exists, and name or selector is valid, but there are no parameters
    @@ -570,16 +570,11 @@ message TypeChecking {
       repeated ExpressionWarning expressionWarnings = 1;
     }
     
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +genclient
    -// +genclient:nonNamespaced
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
     // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
     message ValidatingAdmissionPolicy {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicy.
       optional ValidatingAdmissionPolicySpec spec = 2;
    @@ -606,7 +601,7 @@ message ValidatingAdmissionPolicy {
     message ValidatingAdmissionPolicyBinding {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
       optional ValidatingAdmissionPolicyBindingSpec spec = 2;
    @@ -617,7 +612,7 @@ message ValidatingAdmissionPolicyBindingList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of PolicyBinding.
       repeated ValidatingAdmissionPolicyBinding items = 2;
    @@ -688,14 +683,12 @@ message ValidatingAdmissionPolicyBindingSpec {
       repeated string validationActions = 4;
     }
     
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
     // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
     message ValidatingAdmissionPolicyList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ValidatingAdmissionPolicy.
       repeated ValidatingAdmissionPolicy items = 2;
    @@ -800,7 +793,7 @@ message ValidatingAdmissionPolicyStatus {
       // +optional
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
     }
     
     // ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
    @@ -891,7 +884,7 @@ message ValidatingWebhook {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
     
       // ObjectSelector decides whether to run the webhook based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -905,7 +898,7 @@ message ValidatingWebhook {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
     
       // SideEffects states whether this webhook has side effects.
       // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
    @@ -957,7 +950,7 @@ message ValidatingWebhook {
     message ValidatingWebhookConfiguration {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Webhooks is a list of webhooks and the affected resources and operations.
       // +optional
    @@ -973,7 +966,7 @@ message ValidatingWebhookConfigurationList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ValidatingWebhookConfiguration.
       repeated ValidatingWebhookConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go
    index 0510712b24..4efeb26748 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1/types.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go
    @@ -137,6 +137,7 @@ const (
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
     // +k8s:prerelease-lifecycle-gen:introduced=1.30
    +
     // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
     type ValidatingAdmissionPolicy struct {
     	metav1.TypeMeta `json:",inline"`
    @@ -195,6 +196,7 @@ type ExpressionWarning struct {
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
     // +k8s:prerelease-lifecycle-gen:introduced=1.30
    +
     // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
     type ValidatingAdmissionPolicyList struct {
     	metav1.TypeMeta `json:",inline"`
    @@ -203,7 +205,7 @@ type ValidatingAdmissionPolicyList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of ValidatingAdmissionPolicy.
    -	Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
    @@ -464,7 +466,7 @@ type ValidatingAdmissionPolicyBindingList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of PolicyBinding.
    -	Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
    @@ -713,6 +715,7 @@ type NamedRuleWithOperations struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.16
     
     // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
     type ValidatingWebhookConfiguration struct {
    @@ -730,6 +733,7 @@ type ValidatingWebhookConfiguration struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.16
     
     // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
     type ValidatingWebhookConfigurationList struct {
    @@ -745,6 +749,7 @@ type ValidatingWebhookConfigurationList struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.16
     
     // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
     type MutatingWebhookConfiguration struct {
    @@ -762,6 +767,7 @@ type MutatingWebhookConfiguration struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.16
     
     // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
     type MutatingWebhookConfigurationList struct {
    diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..0862bb1f2d
    --- /dev/null
    +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,70 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 16
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 16
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 16
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 16
    +}
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    index db02dd929f..d5974d5ec4 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
    @@ -156,7 +156,7 @@ message MatchResources {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
     
       // ObjectSelector decides whether to run the validation based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -170,7 +170,7 @@ message MatchResources {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
     
       // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
       // The policy cares about an operation if it matches _any_ Rule.
    @@ -211,7 +211,7 @@ message NamedRuleWithOperations {
       repeated string resourceNames = 1;
     
       // RuleWithOperations is a tuple of Operations and Resources.
    -  optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
    +  optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
     }
     
     // ParamKind is a tuple of Group Kind and Version.
    @@ -267,7 +267,7 @@ message ParamRef {
       // mutually exclusive properties. If one is set, the other must be unset.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     
       // `parameterNotFoundAction` controls the behavior of the binding when the resource
       // exists, and name or selector is valid, but there are no parameters
    @@ -295,7 +295,7 @@ message TypeChecking {
     message ValidatingAdmissionPolicy {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicy.
       optional ValidatingAdmissionPolicySpec spec = 2;
    @@ -322,7 +322,7 @@ message ValidatingAdmissionPolicy {
     message ValidatingAdmissionPolicyBinding {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
       optional ValidatingAdmissionPolicyBindingSpec spec = 2;
    @@ -333,7 +333,7 @@ message ValidatingAdmissionPolicyBindingList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of PolicyBinding.
       repeated ValidatingAdmissionPolicyBinding items = 2;
    @@ -409,7 +409,7 @@ message ValidatingAdmissionPolicyList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ValidatingAdmissionPolicy.
       repeated ValidatingAdmissionPolicy items = 2;
    @@ -514,7 +514,7 @@ message ValidatingAdmissionPolicyStatus {
       // +optional
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
     }
     
     // Validation specifies the CEL expression which is used to apply the validation.
    diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    index bd6b17e158..78d918bc72 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
    @@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of ValidatingAdmissionPolicy.
    -	Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
    @@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of PolicyBinding.
    -	Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    index 91479acc20..30f99f64d0 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
    @@ -157,7 +157,7 @@ message MatchResources {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
     
       // ObjectSelector decides whether to run the validation based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -171,7 +171,7 @@ message MatchResources {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
     
       // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
       // The policy cares about an operation if it matches _any_ Rule.
    @@ -223,7 +223,7 @@ message MutatingWebhook {
       // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
       // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
       // +listType=atomic
    -  repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
    +  repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
     
       // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
       // allowed values are Ignore or Fail. Defaults to Ignore.
    @@ -291,7 +291,7 @@ message MutatingWebhook {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
     
       // ObjectSelector decides whether to run the webhook based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -305,7 +305,7 @@ message MutatingWebhook {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
     
       // SideEffects states whether this webhook has side effects.
       // Acceptable values are: Unknown, None, Some, NoneOnDryRun
    @@ -379,7 +379,7 @@ message MutatingWebhook {
     message MutatingWebhookConfiguration {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Webhooks is a list of webhooks and the affected resources and operations.
       // +optional
    @@ -395,7 +395,7 @@ message MutatingWebhookConfigurationList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of MutatingWebhookConfiguration.
       repeated MutatingWebhookConfiguration items = 2;
    @@ -410,7 +410,7 @@ message NamedRuleWithOperations {
       repeated string resourceNames = 1;
     
       // RuleWithOperations is a tuple of Operations and Resources.
    -  optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
    +  optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
     }
     
     // ParamKind is a tuple of Group Kind and Version.
    @@ -468,7 +468,7 @@ message ParamRef {
       // mutually exclusive properties. If one is set, the other must be unset.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     
       // `parameterNotFoundAction` controls the behavior of the binding when the resource
       // exists, and name or selector is valid, but there are no parameters
    @@ -523,7 +523,7 @@ message TypeChecking {
     message ValidatingAdmissionPolicy {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicy.
       optional ValidatingAdmissionPolicySpec spec = 2;
    @@ -550,7 +550,7 @@ message ValidatingAdmissionPolicy {
     message ValidatingAdmissionPolicyBinding {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
       optional ValidatingAdmissionPolicyBindingSpec spec = 2;
    @@ -561,7 +561,7 @@ message ValidatingAdmissionPolicyBindingList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of PolicyBinding.
       repeated ValidatingAdmissionPolicyBinding items = 2;
    @@ -639,7 +639,7 @@ message ValidatingAdmissionPolicyList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ValidatingAdmissionPolicy.
       repeated ValidatingAdmissionPolicy items = 2;
    @@ -744,7 +744,7 @@ message ValidatingAdmissionPolicyStatus {
       // +optional
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
     }
     
     // ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
    @@ -767,7 +767,7 @@ message ValidatingWebhook {
       // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
       // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
       // +listType=atomic
    -  repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
    +  repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
     
       // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
       // allowed values are Ignore or Fail. Defaults to Ignore.
    @@ -835,7 +835,7 @@ message ValidatingWebhook {
       //
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
     
       // ObjectSelector decides whether to run the webhook based on if the
       // object has matching labels. objectSelector is evaluated against both
    @@ -849,7 +849,7 @@ message ValidatingWebhook {
       // users may skip the admission webhook by setting the labels.
       // Default to the empty LabelSelector, which matches everything.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
     
       // SideEffects states whether this webhook has side effects.
       // Acceptable values are: Unknown, None, Some, NoneOnDryRun
    @@ -906,7 +906,7 @@ message ValidatingWebhook {
     message ValidatingWebhookConfiguration {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Webhooks is a list of webhooks and the affected resources and operations.
       // +optional
    @@ -922,7 +922,7 @@ message ValidatingWebhookConfigurationList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ValidatingWebhookConfiguration.
       repeated ValidatingWebhookConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    index cf1e29a6ca..0f59031239 100644
    --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
    @@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of ValidatingAdmissionPolicy.
    -	Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
    @@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct {
     	// +optional
     	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
     	// List of PolicyBinding.
    -	Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
    +	Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
     
     // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
    diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go
    index d47aa85976..4f3ad5f139 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2/doc.go
    +++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=apidiscovery.k8s.io
     
     package v2 // import "k8s.io/api/apidiscovery/v2"
    diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.proto b/vendor/k8s.io/api/apidiscovery/v2/generated.proto
    index fa56318a6d..62f2d7f2c1 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2/generated.proto
    +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.proto
    @@ -38,7 +38,7 @@ message APIGroupDiscovery {
       // name is allowed to be "" to represent the legacy, ungroupified resources.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // versions are the versions supported in this group. They are sorted in descending order of preference,
       // with the preferred version being the first entry.
    @@ -55,7 +55,7 @@ message APIGroupDiscoveryList {
       // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of groups for discovery. The groups are listed in priority order.
       repeated APIGroupDiscovery items = 2;
    @@ -72,7 +72,7 @@ message APIResourceDiscovery {
       // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
       // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior.
       // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
     
       // scope indicates the scope of a resource, either Cluster or Namespaced
       optional string scope = 3;
    @@ -112,7 +112,7 @@ message APISubresourceDiscovery {
     
       // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
       // Some subresources do not return normal resources, these will have null or empty return types.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
     
       // acceptedTypes describes the kinds that this endpoint accepts.
       // Subresources may accept the standard content types or define
    @@ -122,7 +122,7 @@ message APISubresourceDiscovery {
       // +listMapKey=group
       // +listMapKey=version
       // +listMapKey=kind
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
     
       // verbs is a list of supported API operation types (this includes
       // but is not limited to get, list, watch, create, update, patch,
    diff --git a/vendor/k8s.io/api/apidiscovery/v2/types.go b/vendor/k8s.io/api/apidiscovery/v2/types.go
    index f0e31bcde5..449679b61d 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2/types.go
    +++ b/vendor/k8s.io/api/apidiscovery/v2/types.go
    @@ -21,6 +21,7 @@ import (
     )
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.30
     
     // APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery.
     // This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated
    @@ -37,6 +38,7 @@ type APIGroupDiscoveryList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.30
     
     // APIGroupDiscovery holds information about which resources are being served for all version of the API Group.
     // It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version.
    diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..b7132c647d
    --- /dev/null
    +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v2
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 30
    +}
    diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
    index a09af750ba..e9ae88072a 100644
    --- a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
    +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
    @@ -38,7 +38,7 @@ message APIGroupDiscovery {
       // name is allowed to be "" to represent the legacy, ungroupified resources.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // versions are the versions supported in this group. They are sorted in descending order of preference,
       // with the preferred version being the first entry.
    @@ -55,7 +55,7 @@ message APIGroupDiscoveryList {
       // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of groups for discovery. The groups are listed in priority order.
       repeated APIGroupDiscovery items = 2;
    @@ -72,7 +72,7 @@ message APIResourceDiscovery {
       // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
       // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior.
       // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
     
       // scope indicates the scope of a resource, either Cluster or Namespaced
       optional string scope = 3;
    @@ -112,7 +112,7 @@ message APISubresourceDiscovery {
     
       // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
       // Some subresources do not return normal resources, these will have null or empty return types.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
     
       // acceptedTypes describes the kinds that this endpoint accepts.
       // Subresources may accept the standard content types or define
    @@ -122,7 +122,7 @@ message APISubresourceDiscovery {
       // +listMapKey=group
       // +listMapKey=version
       // +listMapKey=kind
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
     
       // verbs is a list of supported API operation types (this includes
       // but is not limited to get, list, watch, create, update, patch,
    diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
    index ef44290480..8a77860720 100644
    --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
    @@ -52,7 +52,7 @@ message ServerStorageVersion {
     // Storage version of a specific resource.
     message StorageVersion {
       // The name is ..
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec is an empty spec. It is here to comply with Kubernetes API style.
       optional StorageVersionSpec spec = 2;
    @@ -77,7 +77,7 @@ message StorageVersionCondition {
       optional int64 observedGeneration = 3;
     
       // Last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // The reason for the condition's last transition.
       // +required
    @@ -93,7 +93,7 @@ message StorageVersionList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items holds a list of StorageVersion
       repeated StorageVersion items = 2;
    diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
    index 61dc97bde5..d189e860f2 100644
    --- a/vendor/k8s.io/api/apps/v1/doc.go
    +++ b/vendor/k8s.io/api/apps/v1/doc.go
    @@ -17,5 +17,6 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     package v1 // import "k8s.io/api/apps/v1"
    diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
    index 9001416861..d864f2eebf 100644
    --- a/vendor/k8s.io/api/apps/v1/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1/generated.proto
    @@ -43,10 +43,10 @@ message ControllerRevision {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Data is the serialized representation of the state.
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
     
       // Revision indicates the revision of the state represented by Data.
       optional int64 revision = 3;
    @@ -56,7 +56,7 @@ message ControllerRevision {
     message ControllerRevisionList {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of ControllerRevisions
       repeated ControllerRevision items = 2;
    @@ -67,7 +67,7 @@ message DaemonSet {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The desired behavior of this daemon set.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -93,7 +93,7 @@ message DaemonSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -109,7 +109,7 @@ message DaemonSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // A list of daemon sets.
       repeated DaemonSet items = 2;
    @@ -121,7 +121,7 @@ message DaemonSetSpec {
       // Must match in order to be controlled.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
     
       // An object that describes the pod that will be created.
       // The DaemonSet will create exactly one copy of this pod on every node
    @@ -129,7 +129,7 @@ message DaemonSetSpec {
       // selector is specified).
       // The only allowed template.spec.restartPolicy value is "Always".
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 2;
     
       // An update strategy to replace existing DaemonSet pods with new pods.
       // +optional
    @@ -225,7 +225,7 @@ message Deployment {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the Deployment.
       // +optional
    @@ -245,10 +245,10 @@ message DeploymentCondition {
       optional string status = 2;
     
       // The last time this condition was updated.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
     
       // Last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
     
       // The reason for the condition's last transition.
       optional string reason = 4;
    @@ -261,7 +261,7 @@ message DeploymentCondition {
     message DeploymentList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Deployments.
       repeated Deployment items = 2;
    @@ -277,11 +277,11 @@ message DeploymentSpec {
       // Label selector for pods. Existing ReplicaSets whose pods are
       // selected by this will be the ones affected by this deployment.
       // It must match the pod template's labels.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template describes the pods that will be created.
       // The only allowed template.spec.restartPolicy value is "Always".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // The deployment strategy to use to replace existing pods with new ones.
       // +optional
    @@ -376,7 +376,7 @@ message ReplicaSet {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the specification of the desired behavior of the ReplicaSet.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -402,7 +402,7 @@ message ReplicaSetCondition {
     
       // The last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -418,7 +418,7 @@ message ReplicaSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    @@ -444,13 +444,13 @@ message ReplicaSetSpec {
       // Label keys and values that must match in order to be controlled by this replica set.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
       // +optional
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
    @@ -501,7 +501,7 @@ message RollingUpdateDaemonSet {
       // 70% of original number of DaemonSet pods are available at all times during
       // the update.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of nodes with an existing available DaemonSet pod that
       // can have an updated DaemonSet pod during during an update.
    @@ -522,7 +522,7 @@ message RollingUpdateDaemonSet {
       // so resource intensive daemonsets should take into account that they may
       // cause evictions during disruption.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // Spec to control the desired behavior of rolling update.
    @@ -538,7 +538,7 @@ message RollingUpdateDeployment {
       // that the total number of pods available at all times during the update is at
       // least 70% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of pods that can be scheduled above the desired number of
       // pods.
    @@ -552,7 +552,7 @@ message RollingUpdateDeployment {
       // new ReplicaSet can be scaled up further, ensuring that total number of pods running
       // at any time during the update is at most 130% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
    @@ -572,7 +572,7 @@ message RollingUpdateStatefulSetStrategy {
       // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
       // will be counted towards MaxUnavailable.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
     }
     
     // StatefulSet represents a set of pods with consistent identities.
    @@ -586,7 +586,7 @@ message StatefulSet {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the desired identities of pods in this set.
       // +optional
    @@ -608,7 +608,7 @@ message StatefulSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -624,7 +624,7 @@ message StatefulSetList {
       // Standard list's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of stateful sets.
       repeated StatefulSet items = 2;
    @@ -675,7 +675,7 @@ message StatefulSetSpec {
       // selector is a label query over pods that should match the replica count.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // template is the object that describes the pod that will be created if
       // insufficient replicas are detected. Each pod stamped out by the StatefulSet
    @@ -684,7 +684,7 @@ message StatefulSetSpec {
       // -. For example, a pod in a StatefulSet named
       // "web" with index number "3" would be named "web-3".
       // The only allowed template.spec.restartPolicy value is "Always".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // volumeClaimTemplates is a list of claims that pods are allowed to reference.
       // The StatefulSet controller is responsible for mapping network identities to
    @@ -695,7 +695,7 @@ message StatefulSetSpec {
       // TODO: Define the behavior if a claim already exists with the same name.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
    +  repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
     
       // serviceName is the name of the service that governs this StatefulSet.
       // This service must exist before the StatefulSet, and is responsible for
    @@ -738,14 +738,13 @@ message StatefulSetSpec {
       // policy allows the lifecycle to be altered, for example by deleting persistent
       // volume claims when their stateful set is deleted, or when their pod is scaled
       // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
    -  // which is alpha.  +optional
    +  // which is beta.
    +  // +optional
       optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
     
       // ordinals controls the numbering of replica indices in a StatefulSet. The
       // default ordinals behavior assigns a "0" index to the first replica and
    -  // increments the index by one for each additional replica requested. Using
    -  // the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -  // enabled, which is beta.
    +  // increments the index by one for each additional replica requested.
       // +optional
       optional StatefulSetOrdinals ordinals = 11;
     }
    diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
    index 96ff620986..e942cd526e 100644
    --- a/vendor/k8s.io/api/apps/v1/types.go
    +++ b/vendor/k8s.io/api/apps/v1/types.go
    @@ -37,6 +37,7 @@ const (
     // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // StatefulSet represents a set of pods with consistent identities.
     // Identities are defined as:
    @@ -255,14 +256,13 @@ type StatefulSetSpec struct {
     	// policy allows the lifecycle to be altered, for example by deleting persistent
     	// volume claims when their stateful set is deleted, or when their pod is scaled
     	// down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
    -	// which is alpha.  +optional
    +	// which is beta.
    +	// +optional
     	PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
     
     	// ordinals controls the numbering of replica indices in a StatefulSet. The
     	// default ordinals behavior assigns a "0" index to the first replica and
    -	// increments the index by one for each additional replica requested. Using
    -	// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -	// enabled, which is beta.
    +	// increments the index by one for each additional replica requested.
     	// +optional
     	Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"`
     }
    @@ -335,6 +335,7 @@ type StatefulSetCondition struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // StatefulSetList is a collection of StatefulSets.
     type StatefulSetList struct {
    @@ -353,6 +354,7 @@ type StatefulSetList struct {
     // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // Deployment enables declarative updates for Pods and ReplicaSets.
     type Deployment struct {
    @@ -555,6 +557,7 @@ type DeploymentCondition struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // DeploymentList is a list of Deployments.
     type DeploymentList struct {
    @@ -747,6 +750,7 @@ type DaemonSetCondition struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // DaemonSet represents the configuration of a daemon set.
     type DaemonSet struct {
    @@ -778,6 +782,7 @@ const (
     )
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // DaemonSetList is a collection of daemon sets.
     type DaemonSetList struct {
    @@ -796,6 +801,7 @@ type DaemonSetList struct {
     // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // ReplicaSet ensures that a specified number of pod replicas are running at any given time.
     type ReplicaSet struct {
    @@ -823,6 +829,7 @@ type ReplicaSet struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // ReplicaSetList is a collection of ReplicaSets.
     type ReplicaSetList struct {
    @@ -925,6 +932,7 @@ type ReplicaSetCondition struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // ControllerRevision implements an immutable snapshot of state data. Clients
     // are responsible for serializing and deserializing the objects that contain
    @@ -950,6 +958,7 @@ type ControllerRevision struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.9
     
     // ControllerRevisionList is a resource containing a list of ControllerRevision objects.
     type ControllerRevisionList struct {
    diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    index 6676da0640..f3e221a0e9 100644
    --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
    @@ -354,8 +354,8 @@ var map_StatefulSetSpec = map[string]string{
     	"updateStrategy":                       "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
     	"revisionHistoryLimit":                 "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
     	"minReadySeconds":                      "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
    -	"persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.  +optional",
    -	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.",
    +	"persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.",
    +	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
     }
     
     func (StatefulSetSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..34a036b625
    --- /dev/null
    +++ b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,82 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ControllerRevision) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ControllerRevisionList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *DaemonSet) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *DaemonSetList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Deployment) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *DeploymentList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ReplicaSetList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *StatefulSet) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *StatefulSetList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 9
    +}
    diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
    index 8965622257..4b0fa366cf 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
    @@ -45,10 +45,10 @@ message ControllerRevision {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // data is the serialized representation of the state.
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
     
       // revision indicates the revision of the state represented by Data.
       optional int64 revision = 3;
    @@ -58,7 +58,7 @@ message ControllerRevision {
     message ControllerRevisionList {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of ControllerRevisions
       repeated ControllerRevision items = 2;
    @@ -70,7 +70,7 @@ message ControllerRevisionList {
     message Deployment {
       // Standard object metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the Deployment.
       // +optional
    @@ -90,10 +90,10 @@ message DeploymentCondition {
       optional string status = 2;
     
       // The last time this condition was updated.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
     
       // Last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
     
       // The reason for the condition's last transition.
       optional string reason = 4;
    @@ -106,7 +106,7 @@ message DeploymentCondition {
     message DeploymentList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Deployments.
       repeated Deployment items = 2;
    @@ -136,11 +136,11 @@ message DeploymentSpec {
       // selector is the label selector for pods. Existing ReplicaSets whose pods are
       // selected by this will be the ones affected by this deployment.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template describes the pods that will be created.
       // The only allowed template.spec.restartPolicy value is "Always".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // The deployment strategy to use to replace existing pods with new ones.
       // +optional
    @@ -254,7 +254,7 @@ message RollingUpdateDeployment {
       // that the total number of pods available at all times during the update is at
       // least 70% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of pods that can be scheduled above the desired number of
       // pods.
    @@ -268,7 +268,7 @@ message RollingUpdateDeployment {
       // new ReplicaSet can be scaled up further, ensuring that total number of pods running
       // at any time during the update is at most 130% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
    @@ -287,14 +287,14 @@ message RollingUpdateStatefulSetStrategy {
       // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
       // will be counted towards MaxUnavailable.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
     }
     
     // Scale represents a scaling request for a resource.
     message Scale {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
       // +optional
    @@ -342,7 +342,7 @@ message ScaleStatus {
     // map to the same storage identity.
     message StatefulSet {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the desired identities of pods in this set.
       // +optional
    @@ -364,7 +364,7 @@ message StatefulSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -378,7 +378,7 @@ message StatefulSetCondition {
     // StatefulSetList is a collection of StatefulSets.
     message StatefulSetList {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       repeated StatefulSet items = 2;
     }
    @@ -429,7 +429,7 @@ message StatefulSetSpec {
       // If empty, defaulted to labels on the pod template.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // template is the object that describes the pod that will be created if
       // insufficient replicas are detected. Each pod stamped out by the StatefulSet
    @@ -437,7 +437,7 @@ message StatefulSetSpec {
       // of the StatefulSet. Each pod will be named with the format
       // -. For example, a pod in a StatefulSet named
       // "web" with index number "3" would be named "web-3".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // volumeClaimTemplates is a list of claims that pods are allowed to reference.
       // The StatefulSet controller is responsible for mapping network identities to
    @@ -448,7 +448,7 @@ message StatefulSetSpec {
       // TODO: Define the behavior if a claim already exists with the same name.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
    +  repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
     
       // serviceName is the name of the service that governs this StatefulSet.
       // This service must exist before the StatefulSet, and is responsible for
    @@ -493,9 +493,7 @@ message StatefulSetSpec {
     
       // ordinals controls the numbering of replica indices in a StatefulSet. The
       // default ordinals behavior assigns a "0" index to the first replica and
    -  // increments the index by one for each additional replica requested. Using
    -  // the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -  // enabled, which is beta.
    +  // increments the index by one for each additional replica requested.
       // +optional
       optional StatefulSetOrdinals ordinals = 11;
     }
    diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
    index bdf9f93a9b..07bfa88c5f 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/types.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/types.go
    @@ -297,9 +297,7 @@ type StatefulSetSpec struct {
     
     	// ordinals controls the numbering of replica indices in a StatefulSet. The
     	// default ordinals behavior assigns a "0" index to the first replica and
    -	// increments the index by one for each additional replica requested. Using
    -	// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -	// enabled, which is beta.
    +	// increments the index by one for each additional replica requested.
     	// +optional
     	Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"`
     }
    diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    index a62e9869d6..9e7fb1adc2 100644
    --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
    @@ -259,7 +259,7 @@ var map_StatefulSetSpec = map[string]string{
     	"revisionHistoryLimit":                 "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
     	"minReadySeconds":                      "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
     	"persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.",
    -	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.",
    +	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
     }
     
     func (StatefulSetSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
    index 3ae8a80094..d3db8956e8 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
    +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
    @@ -45,10 +45,10 @@ message ControllerRevision {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Data is the serialized representation of the state.
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
     
       // Revision indicates the revision of the state represented by Data.
       optional int64 revision = 3;
    @@ -58,7 +58,7 @@ message ControllerRevision {
     message ControllerRevisionList {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of ControllerRevisions
       repeated ControllerRevision items = 2;
    @@ -71,7 +71,7 @@ message DaemonSet {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The desired behavior of this daemon set.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -97,7 +97,7 @@ message DaemonSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -113,7 +113,7 @@ message DaemonSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // A list of daemon sets.
       repeated DaemonSet items = 2;
    @@ -125,7 +125,7 @@ message DaemonSetSpec {
       // Must match in order to be controlled.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
     
       // An object that describes the pod that will be created.
       // The DaemonSet will create exactly one copy of this pod on every node
    @@ -133,7 +133,7 @@ message DaemonSetSpec {
       // selector is specified).
       // The only allowed template.spec.restartPolicy value is "Always".
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 2;
     
       // An update strategy to replace existing DaemonSet pods with new pods.
       // +optional
    @@ -230,7 +230,7 @@ message DaemonSetUpdateStrategy {
     message Deployment {
       // Standard object metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the Deployment.
       // +optional
    @@ -250,10 +250,10 @@ message DeploymentCondition {
       optional string status = 2;
     
       // The last time this condition was updated.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
     
       // Last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
     
       // The reason for the condition's last transition.
       optional string reason = 4;
    @@ -266,7 +266,7 @@ message DeploymentCondition {
     message DeploymentList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Deployments.
       repeated Deployment items = 2;
    @@ -282,11 +282,11 @@ message DeploymentSpec {
       // Label selector for pods. Existing ReplicaSets whose pods are
       // selected by this will be the ones affected by this deployment.
       // It must match the pod template's labels.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template describes the pods that will be created.
       // The only allowed template.spec.restartPolicy value is "Always".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // The deployment strategy to use to replace existing pods with new ones.
       // +optional
    @@ -382,7 +382,7 @@ message ReplicaSet {
       // be the same as the Pod(s) that the ReplicaSet manages.
       // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the specification of the desired behavior of the ReplicaSet.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -408,7 +408,7 @@ message ReplicaSetCondition {
     
       // The last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -424,7 +424,7 @@ message ReplicaSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    @@ -450,13 +450,13 @@ message ReplicaSetSpec {
       // Label keys and values that must match in order to be controlled by this replica set.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
       // +optional
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
    @@ -507,7 +507,7 @@ message RollingUpdateDaemonSet {
       // 70% of original number of DaemonSet pods are available at all times during
       // the update.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of nodes with an existing available DaemonSet pod that
       // can have an updated DaemonSet pod during during an update.
    @@ -528,7 +528,7 @@ message RollingUpdateDaemonSet {
       // so resource intensive daemonsets should take into account that they may
       // cause evictions during disruption.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // Spec to control the desired behavior of rolling update.
    @@ -544,7 +544,7 @@ message RollingUpdateDeployment {
       // that the total number of pods available at all times during the update is at
       // least 70% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of pods that can be scheduled above the desired number of
       // pods.
    @@ -558,7 +558,7 @@ message RollingUpdateDeployment {
       // new ReplicaSet can be scaled up further, ensuring that total number of pods running
       // at any time during the update is at most 130% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
    @@ -578,14 +578,14 @@ message RollingUpdateStatefulSetStrategy {
       // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
       // will be counted towards MaxUnavailable.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
     }
     
     // Scale represents a scaling request for a resource.
     message Scale {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
       // +optional
    @@ -634,7 +634,7 @@ message ScaleStatus {
     // map to the same storage identity.
     message StatefulSet {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the desired identities of pods in this set.
       // +optional
    @@ -656,7 +656,7 @@ message StatefulSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -670,7 +670,7 @@ message StatefulSetCondition {
     // StatefulSetList is a collection of StatefulSets.
     message StatefulSetList {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       repeated StatefulSet items = 2;
     }
    @@ -720,7 +720,7 @@ message StatefulSetSpec {
       // selector is a label query over pods that should match the replica count.
       // It must match the pod template's labels.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // template is the object that describes the pod that will be created if
       // insufficient replicas are detected. Each pod stamped out by the StatefulSet
    @@ -729,7 +729,7 @@ message StatefulSetSpec {
       // -. For example, a pod in a StatefulSet named
       // "web" with index number "3" would be named "web-3".
       // The only allowed template.spec.restartPolicy value is "Always".
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // volumeClaimTemplates is a list of claims that pods are allowed to reference.
       // The StatefulSet controller is responsible for mapping network identities to
    @@ -740,7 +740,7 @@ message StatefulSetSpec {
       // TODO: Define the behavior if a claim already exists with the same name.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
    +  repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
     
       // serviceName is the name of the service that governs this StatefulSet.
       // This service must exist before the StatefulSet, and is responsible for
    @@ -785,9 +785,7 @@ message StatefulSetSpec {
     
       // ordinals controls the numbering of replica indices in a StatefulSet. The
       // default ordinals behavior assigns a "0" index to the first replica and
    -  // increments the index by one for each additional replica requested. Using
    -  // the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -  // enabled, which is beta.
    +  // increments the index by one for each additional replica requested.
       // +optional
       optional StatefulSetOrdinals ordinals = 11;
     }
    diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
    index 6981c2a175..f93a5bea7e 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/types.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/types.go
    @@ -307,9 +307,7 @@ type StatefulSetSpec struct {
     
     	// ordinals controls the numbering of replica indices in a StatefulSet. The
     	// default ordinals behavior assigns a "0" index to the first replica and
    -	// increments the index by one for each additional replica requested. Using
    -	// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
    -	// enabled, which is beta.
    +	// increments the index by one for each additional replica requested.
     	// +optional
     	Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"`
     }
    diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    index d7e9209915..0b8fe34af1 100644
    --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
    @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{
     	"revisionHistoryLimit":                 "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
     	"minReadySeconds":                      "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
     	"persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.",
    -	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.",
    +	"ordinals":                             "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
     }
     
     func (StatefulSetSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go
    index 1614265bdf..3bdc89badc 100644
    --- a/vendor/k8s.io/api/authentication/v1/doc.go
    +++ b/vendor/k8s.io/api/authentication/v1/doc.go
    @@ -18,5 +18,6 @@ limitations under the License.
     // +k8s:protobuf-gen=package
     // +groupName=authentication.k8s.io
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     package v1 // import "k8s.io/api/authentication/v1"
    diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto
    index 1fe2f4f2ce..ae9763576c 100644
    --- a/vendor/k8s.io/api/authentication/v1/generated.proto
    +++ b/vendor/k8s.io/api/authentication/v1/generated.proto
    @@ -63,7 +63,7 @@ message SelfSubjectReview {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Status is filled in by the server with the user attributes.
       optional SelfSubjectReviewStatus status = 2;
    @@ -81,7 +81,7 @@ message TokenRequest {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated
       optional TokenRequestSpec spec = 2;
    @@ -123,7 +123,7 @@ message TokenRequestStatus {
       optional string token = 1;
     
       // ExpirationTimestamp is the time of expiration of the returned token.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2;
     }
     
     // TokenReview attempts to authenticate a token to a known user.
    @@ -133,7 +133,7 @@ message TokenReview {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated
       optional TokenReviewSpec spec = 2;
    diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go
    index 4f4400e305..2dc0707c4f 100644
    --- a/vendor/k8s.io/api/authentication/v1/types.go
    +++ b/vendor/k8s.io/api/authentication/v1/types.go
    @@ -45,6 +45,7 @@ const (
     // +genclient:nonNamespaced
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.6
     
     // TokenReview attempts to authenticate a token to a known user.
     // Note: TokenReview requests may be cached by the webhook token authenticator
    @@ -134,6 +135,7 @@ func (t ExtraValue) String() string {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.10
     
     // TokenRequest requests a token for a given service account.
     type TokenRequest struct {
    @@ -206,6 +208,7 @@ type BoundObjectReference struct {
     // +genclient:nonNamespaced
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.28
     
     // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request.
     // When using impersonation, users will receive the user info of the user being impersonated.  If impersonation or
    diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..b612bdec48
    --- /dev/null
    +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,40 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 28
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 10
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *TokenReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 6
    +}
    diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto
    index 51d9252440..4585e5cdd3 100644
    --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto
    @@ -36,7 +36,7 @@ message SelfSubjectReview {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Status is filled in by the server with the user attributes.
       optional SelfSubjectReviewStatus status = 2;
    @@ -46,6 +46,6 @@ message SelfSubjectReview {
     message SelfSubjectReviewStatus {
       // User attributes of the user making this request.
       // +optional
    -  optional k8s.io.api.authentication.v1.UserInfo userInfo = 1;
    +  optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1;
     }
     
    diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
    index 61658245d4..d0f6fe4402 100644
    --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
    @@ -45,7 +45,7 @@ message SelfSubjectReview {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Status is filled in by the server with the user attributes.
       optional SelfSubjectReviewStatus status = 2;
    @@ -55,7 +55,7 @@ message SelfSubjectReview {
     message SelfSubjectReviewStatus {
       // User attributes of the user making this request.
       // +optional
    -  optional k8s.io.api.authentication.v1.UserInfo userInfo = 1;
    +  optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1;
     }
     
     // TokenReview attempts to authenticate a token to a known user.
    @@ -65,7 +65,7 @@ message TokenReview {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated
       optional TokenReviewSpec spec = 2;
    diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go
    index cf100e6b75..77e5a19c4c 100644
    --- a/vendor/k8s.io/api/authorization/v1/doc.go
    +++ b/vendor/k8s.io/api/authorization/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=authorization.k8s.io
     
     package v1 // import "k8s.io/api/authorization/v1"
    diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go
    index dfa109b424..aed9a3a476 100644
    --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go
    @@ -26,6 +26,7 @@ import (
     
     	proto "github.com/gogo/protobuf/proto"
     	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     
     	math "math"
     	math_bits "math/bits"
    @@ -72,10 +73,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
     
    +func (m *FieldSelectorAttributes) Reset()      { *m = FieldSelectorAttributes{} }
    +func (*FieldSelectorAttributes) ProtoMessage() {}
    +func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_aafd0e5e70cec678, []int{1}
    +}
    +func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_FieldSelectorAttributes.Merge(m, src)
    +}
    +func (m *FieldSelectorAttributes) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *FieldSelectorAttributes) XXX_DiscardUnknown() {
    +	xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo
    +
    +func (m *LabelSelectorAttributes) Reset()      { *m = LabelSelectorAttributes{} }
    +func (*LabelSelectorAttributes) ProtoMessage() {}
    +func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_aafd0e5e70cec678, []int{2}
    +}
    +func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LabelSelectorAttributes.Merge(m, src)
    +}
    +func (m *LabelSelectorAttributes) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LabelSelectorAttributes) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo
    +
     func (m *LocalSubjectAccessReview) Reset()      { *m = LocalSubjectAccessReview{} }
     func (*LocalSubjectAccessReview) ProtoMessage() {}
     func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{1}
    +	return fileDescriptor_aafd0e5e70cec678, []int{3}
     }
     func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -103,7 +160,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo
     func (m *NonResourceAttributes) Reset()      { *m = NonResourceAttributes{} }
     func (*NonResourceAttributes) ProtoMessage() {}
     func (*NonResourceAttributes) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{2}
    +	return fileDescriptor_aafd0e5e70cec678, []int{4}
     }
     func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -131,7 +188,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo
     func (m *NonResourceRule) Reset()      { *m = NonResourceRule{} }
     func (*NonResourceRule) ProtoMessage() {}
     func (*NonResourceRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{3}
    +	return fileDescriptor_aafd0e5e70cec678, []int{5}
     }
     func (m *NonResourceRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -159,7 +216,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo
     func (m *ResourceAttributes) Reset()      { *m = ResourceAttributes{} }
     func (*ResourceAttributes) ProtoMessage() {}
     func (*ResourceAttributes) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{4}
    +	return fileDescriptor_aafd0e5e70cec678, []int{6}
     }
     func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -187,7 +244,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo
     func (m *ResourceRule) Reset()      { *m = ResourceRule{} }
     func (*ResourceRule) ProtoMessage() {}
     func (*ResourceRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{5}
    +	return fileDescriptor_aafd0e5e70cec678, []int{7}
     }
     func (m *ResourceRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -215,7 +272,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo
     func (m *SelfSubjectAccessReview) Reset()      { *m = SelfSubjectAccessReview{} }
     func (*SelfSubjectAccessReview) ProtoMessage() {}
     func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{6}
    +	return fileDescriptor_aafd0e5e70cec678, []int{8}
     }
     func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -243,7 +300,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo
     func (m *SelfSubjectAccessReviewSpec) Reset()      { *m = SelfSubjectAccessReviewSpec{} }
     func (*SelfSubjectAccessReviewSpec) ProtoMessage() {}
     func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{7}
    +	return fileDescriptor_aafd0e5e70cec678, []int{9}
     }
     func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -271,7 +328,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo
     func (m *SelfSubjectRulesReview) Reset()      { *m = SelfSubjectRulesReview{} }
     func (*SelfSubjectRulesReview) ProtoMessage() {}
     func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{8}
    +	return fileDescriptor_aafd0e5e70cec678, []int{10}
     }
     func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -299,7 +356,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo
     func (m *SelfSubjectRulesReviewSpec) Reset()      { *m = SelfSubjectRulesReviewSpec{} }
     func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
     func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{9}
    +	return fileDescriptor_aafd0e5e70cec678, []int{11}
     }
     func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -327,7 +384,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo
     func (m *SubjectAccessReview) Reset()      { *m = SubjectAccessReview{} }
     func (*SubjectAccessReview) ProtoMessage() {}
     func (*SubjectAccessReview) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{10}
    +	return fileDescriptor_aafd0e5e70cec678, []int{12}
     }
     func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -355,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo
     func (m *SubjectAccessReviewSpec) Reset()      { *m = SubjectAccessReviewSpec{} }
     func (*SubjectAccessReviewSpec) ProtoMessage() {}
     func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{11}
    +	return fileDescriptor_aafd0e5e70cec678, []int{13}
     }
     func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -383,7 +440,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo
     func (m *SubjectAccessReviewStatus) Reset()      { *m = SubjectAccessReviewStatus{} }
     func (*SubjectAccessReviewStatus) ProtoMessage() {}
     func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{12}
    +	return fileDescriptor_aafd0e5e70cec678, []int{14}
     }
     func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -411,7 +468,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo
     func (m *SubjectRulesReviewStatus) Reset()      { *m = SubjectRulesReviewStatus{} }
     func (*SubjectRulesReviewStatus) ProtoMessage() {}
     func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_aafd0e5e70cec678, []int{13}
    +	return fileDescriptor_aafd0e5e70cec678, []int{15}
     }
     func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -438,6 +495,8 @@ var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo
     
     func init() {
     	proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue")
    +	proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes")
    +	proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes")
     	proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview")
     	proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes")
     	proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule")
    @@ -459,78 +518,85 @@ func init() {
     }
     
     var fileDescriptor_aafd0e5e70cec678 = []byte{
    -	// 1126 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
    -	0x14, 0xf7, 0xfa, 0x4f, 0x6a, 0x3f, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x11, 0x76, 0xb4, 0x48,
    -	0x90, 0xaa, 0x65, 0x97, 0x58, 0x6d, 0x13, 0x55, 0xaa, 0x90, 0xad, 0x46, 0x28, 0x52, 0x5b, 0xaa,
    -	0x89, 0x12, 0x89, 0x22, 0x10, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xbb, 0x0e, 0xe1,
    -	0x54, 0x89, 0x2f, 0xc0, 0x91, 0x03, 0x07, 0xbe, 0x01, 0x17, 0x24, 0x6e, 0x1c, 0x38, 0xa0, 0x1c,
    -	0x7b, 0x2c, 0x12, 0xb2, 0xc8, 0x72, 0xe6, 0x3b, 0xa0, 0x99, 0x1d, 0x7b, 0xd7, 0xc9, 0xda, 0x8d,
    -	0x39, 0xd0, 0x4b, 0x6f, 0xde, 0xf7, 0xfb, 0xbd, 0x37, 0x6f, 0xde, 0xbf, 0x79, 0x86, 0x5b, 0x47,
    -	0xdb, 0x4c, 0xb7, 0x1c, 0x83, 0xb8, 0x96, 0x41, 0x02, 0xbf, 0xe3, 0x78, 0xd6, 0x37, 0xc4, 0xb7,
    -	0x1c, 0xdb, 0xe8, 0x6f, 0x1a, 0x6d, 0x6a, 0x53, 0x8f, 0xf8, 0xb4, 0xa5, 0xbb, 0x9e, 0xe3, 0x3b,
    -	0x68, 0x2d, 0x22, 0xeb, 0xc4, 0xb5, 0xf4, 0x31, 0xb2, 0xde, 0xdf, 0x5c, 0x7d, 0xbf, 0x6d, 0xf9,
    -	0x9d, 0xa0, 0xa9, 0x9b, 0x4e, 0xcf, 0x68, 0x3b, 0x6d, 0xc7, 0x10, 0x3a, 0xcd, 0xe0, 0x50, 0x7c,
    -	0x89, 0x0f, 0xf1, 0x2b, 0xb2, 0xb5, 0x7a, 0x27, 0x3e, 0xb8, 0x47, 0xcc, 0x8e, 0x65, 0x53, 0xef,
    -	0xc4, 0x70, 0x8f, 0xda, 0x5c, 0xc0, 0x8c, 0x1e, 0xf5, 0x49, 0x8a, 0x07, 0xab, 0xc6, 0x24, 0x2d,
    -	0x2f, 0xb0, 0x7d, 0xab, 0x47, 0x2f, 0x28, 0xdc, 0x7b, 0x95, 0x02, 0x33, 0x3b, 0xb4, 0x47, 0xce,
    -	0xeb, 0x69, 0x5b, 0x00, 0x3b, 0x5f, 0xfb, 0x1e, 0x39, 0x20, 0xdd, 0x80, 0xa2, 0x2a, 0x14, 0x2c,
    -	0x9f, 0xf6, 0x98, 0xaa, 0xac, 0xe7, 0x36, 0x4a, 0x8d, 0x52, 0x38, 0xa8, 0x16, 0x76, 0xb9, 0x00,
    -	0x47, 0xf2, 0xfb, 0xc5, 0xef, 0x7f, 0xac, 0x66, 0x9e, 0xff, 0xb9, 0x9e, 0xd1, 0x7e, 0xce, 0x82,
    -	0xfa, 0xc8, 0x31, 0x49, 0x77, 0x2f, 0x68, 0x7e, 0x49, 0x4d, 0xbf, 0x6e, 0x9a, 0x94, 0x31, 0x4c,
    -	0xfb, 0x16, 0x3d, 0x46, 0x5f, 0x40, 0x91, 0xdf, 0xac, 0x45, 0x7c, 0xa2, 0x2a, 0xeb, 0xca, 0x46,
    -	0xb9, 0xf6, 0x81, 0x1e, 0xc7, 0x74, 0xe4, 0xa0, 0xee, 0x1e, 0xb5, 0xb9, 0x80, 0xe9, 0x9c, 0xad,
    -	0xf7, 0x37, 0xf5, 0x8f, 0x85, 0xad, 0xc7, 0xd4, 0x27, 0x0d, 0x74, 0x3a, 0xa8, 0x66, 0xc2, 0x41,
    -	0x15, 0x62, 0x19, 0x1e, 0x59, 0x45, 0x07, 0x90, 0x67, 0x2e, 0x35, 0xd5, 0xac, 0xb0, 0x7e, 0x47,
    -	0x9f, 0x92, 0x31, 0x3d, 0xc5, 0xc3, 0x3d, 0x97, 0x9a, 0x8d, 0xab, 0xf2, 0x84, 0x3c, 0xff, 0xc2,
    -	0xc2, 0x1e, 0xfa, 0x1c, 0xe6, 0x98, 0x4f, 0xfc, 0x80, 0xa9, 0x39, 0x61, 0xf9, 0xde, 0xcc, 0x96,
    -	0x85, 0x76, 0xe3, 0x2d, 0x69, 0x7b, 0x2e, 0xfa, 0xc6, 0xd2, 0xaa, 0xf6, 0x29, 0x2c, 0x3f, 0x71,
    -	0x6c, 0x4c, 0x99, 0x13, 0x78, 0x26, 0xad, 0xfb, 0xbe, 0x67, 0x35, 0x03, 0x9f, 0x32, 0xb4, 0x0e,
    -	0x79, 0x97, 0xf8, 0x1d, 0x11, 0xae, 0x52, 0xec, 0xda, 0x53, 0xe2, 0x77, 0xb0, 0x40, 0x38, 0xa3,
    -	0x4f, 0xbd, 0xa6, 0xb8, 0x72, 0x82, 0x71, 0x40, 0xbd, 0x26, 0x16, 0x88, 0xf6, 0x15, 0x2c, 0x24,
    -	0x8c, 0xe3, 0xa0, 0x2b, 0x32, 0xca, 0xa1, 0xb1, 0x8c, 0x72, 0x0d, 0x86, 0x23, 0x39, 0x7a, 0x00,
    -	0x0b, 0x76, 0xac, 0xb3, 0x8f, 0x1f, 0x31, 0x35, 0x2b, 0xa8, 0x4b, 0xe1, 0xa0, 0x9a, 0x34, 0xc7,
    -	0x21, 0x7c, 0x9e, 0xab, 0xfd, 0x9a, 0x05, 0x94, 0x72, 0x1b, 0x03, 0x4a, 0x36, 0xe9, 0x51, 0xe6,
    -	0x12, 0x93, 0xca, 0x2b, 0x5d, 0x93, 0x0e, 0x97, 0x9e, 0x0c, 0x01, 0x1c, 0x73, 0x5e, 0x7d, 0x39,
    -	0xf4, 0x0e, 0x14, 0xda, 0x9e, 0x13, 0xb8, 0x22, 0x31, 0xa5, 0xc6, 0xbc, 0xa4, 0x14, 0x3e, 0xe2,
    -	0x42, 0x1c, 0x61, 0xe8, 0x26, 0x5c, 0xe9, 0x53, 0x8f, 0x59, 0x8e, 0xad, 0xe6, 0x05, 0x6d, 0x41,
    -	0xd2, 0xae, 0x1c, 0x44, 0x62, 0x3c, 0xc4, 0xd1, 0x6d, 0x28, 0x7a, 0xd2, 0x71, 0xb5, 0x20, 0xb8,
    -	0x8b, 0x92, 0x5b, 0x1c, 0x45, 0x70, 0xc4, 0x40, 0x77, 0xa1, 0xcc, 0x82, 0xe6, 0x48, 0x61, 0x4e,
    -	0x28, 0x2c, 0x49, 0x85, 0xf2, 0x5e, 0x0c, 0xe1, 0x24, 0x8f, 0x5f, 0x8b, 0xdf, 0x51, 0xbd, 0x32,
    -	0x7e, 0x2d, 0x1e, 0x02, 0x2c, 0x10, 0xed, 0x37, 0x05, 0xae, 0xce, 0x96, 0xb1, 0x5b, 0x50, 0x22,
    -	0xae, 0x25, 0xae, 0x3d, 0xcc, 0xd5, 0x3c, 0x8f, 0x6b, 0xfd, 0xe9, 0x6e, 0x24, 0xc4, 0x31, 0xce,
    -	0xc9, 0x43, 0x67, 0x78, 0x49, 0x8f, 0xc8, 0xc3, 0x23, 0x19, 0x8e, 0x71, 0xb4, 0x05, 0xf3, 0xc3,
    -	0x0f, 0x91, 0x24, 0x35, 0x2f, 0x14, 0xae, 0x85, 0x83, 0xea, 0x3c, 0x4e, 0x02, 0x78, 0x9c, 0xa7,
    -	0xfd, 0x92, 0x85, 0x95, 0x3d, 0xda, 0x3d, 0x7c, 0x3d, 0xb3, 0xe0, 0xd9, 0xd8, 0x2c, 0xd8, 0x9e,
    -	0xde, 0xb1, 0xe9, 0x5e, 0xbe, 0xb6, 0x79, 0xf0, 0x43, 0x16, 0xd6, 0xa6, 0xf8, 0x84, 0x8e, 0x01,
    -	0x79, 0x17, 0xda, 0x4b, 0xc6, 0xd1, 0x98, 0xea, 0xcb, 0xc5, 0xae, 0x6c, 0x5c, 0x0f, 0x07, 0xd5,
    -	0x94, 0x6e, 0xc5, 0x29, 0x47, 0xa0, 0x6f, 0x15, 0x58, 0xb6, 0xd3, 0x26, 0x95, 0x0c, 0x73, 0x6d,
    -	0xea, 0xe1, 0xa9, 0x33, 0xae, 0x71, 0x23, 0x1c, 0x54, 0xd3, 0xc7, 0x1f, 0x4e, 0x3f, 0x8b, 0xbf,
    -	0x32, 0xd7, 0x13, 0xe1, 0xe1, 0x0d, 0xf2, 0xff, 0xd5, 0xd5, 0x27, 0x63, 0x75, 0xb5, 0x75, 0xd9,
    -	0xba, 0x4a, 0x38, 0x39, 0xb1, 0xac, 0x3e, 0x3b, 0x57, 0x56, 0x77, 0x2f, 0x53, 0x56, 0x49, 0xc3,
    -	0xd3, 0xab, 0xea, 0x31, 0xac, 0x4e, 0x76, 0x68, 0xe6, 0xe1, 0xac, 0xfd, 0x94, 0x85, 0xa5, 0x37,
    -	0xcf, 0xfc, 0x2c, 0x6d, 0xfd, 0x7b, 0x1e, 0x56, 0xde, 0xb4, 0xf4, 0xa4, 0x45, 0x27, 0x60, 0xd4,
    -	0x93, 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x7a, 0x58, 0x20, 0x48, 0x83, 0xb9, 0x76, 0xf4, 0xba,
    -	0x45, 0xef, 0x0f, 0xf0, 0x00, 0xcb, 0xa7, 0x4d, 0x22, 0xa8, 0x05, 0x05, 0xca, 0xf7, 0x56, 0xb5,
    -	0xb0, 0x9e, 0xdb, 0x28, 0xd7, 0x3e, 0xfc, 0x2f, 0x95, 0xa1, 0x8b, 0xcd, 0x77, 0xc7, 0xf6, 0xbd,
    -	0x93, 0x78, 0x9d, 0x10, 0x32, 0x1c, 0x19, 0x47, 0x6f, 0x43, 0x2e, 0xb0, 0x5a, 0xf2, 0xb5, 0x2f,
    -	0x4b, 0x4a, 0x6e, 0x7f, 0xf7, 0x21, 0xe6, 0xf2, 0x55, 0x22, 0x97, 0x67, 0x61, 0x02, 0x2d, 0x42,
    -	0xee, 0x88, 0x9e, 0x44, 0x0d, 0x85, 0xf9, 0x4f, 0xf4, 0x00, 0x0a, 0x7d, 0xbe, 0x57, 0xcb, 0xf8,
    -	0xbe, 0x37, 0xd5, 0xc9, 0x78, 0x0d, 0xc7, 0x91, 0xd6, 0xfd, 0xec, 0xb6, 0xa2, 0xfd, 0xa1, 0xc0,
    -	0x8d, 0x89, 0xe5, 0xc7, 0xd7, 0x1d, 0xd2, 0xed, 0x3a, 0xc7, 0xb4, 0x25, 0x8e, 0x2d, 0xc6, 0xeb,
    -	0x4e, 0x3d, 0x12, 0xe3, 0x21, 0x8e, 0xde, 0x85, 0xb9, 0x16, 0xb5, 0x2d, 0xda, 0x12, 0x8b, 0x51,
    -	0x31, 0xae, 0xdc, 0x87, 0x42, 0x8a, 0x25, 0xca, 0x79, 0x1e, 0x25, 0xcc, 0xb1, 0xe5, 0x2a, 0x36,
    -	0xe2, 0x61, 0x21, 0xc5, 0x12, 0x45, 0x75, 0x58, 0xa0, 0xdc, 0x4d, 0xe1, 0xff, 0x8e, 0xe7, 0x39,
    -	0xc3, 0x8c, 0xae, 0x48, 0x85, 0x85, 0x9d, 0x71, 0x18, 0x9f, 0xe7, 0x6b, 0xff, 0x64, 0x41, 0x9d,
    -	0x34, 0xda, 0xd0, 0x61, 0xbc, 0x8b, 0x08, 0x50, 0xac, 0x43, 0xe5, 0xda, 0xcd, 0x4b, 0x35, 0x08,
    -	0xd7, 0x68, 0x2c, 0x4b, 0x47, 0xe6, 0x93, 0xd2, 0xc4, 0xea, 0x22, 0x3e, 0x91, 0x07, 0x8b, 0xf6,
    -	0xf8, 0xce, 0x1c, 0x2d, 0x55, 0xe5, 0xda, 0xed, 0xcb, 0xb6, 0x83, 0x38, 0x4d, 0x95, 0xa7, 0x2d,
    -	0x9e, 0x03, 0x18, 0xbe, 0x60, 0x1f, 0xd5, 0x00, 0x2c, 0xdb, 0x74, 0x7a, 0x6e, 0x97, 0xfa, 0x54,
    -	0x84, 0xad, 0x18, 0xcf, 0xc1, 0xdd, 0x11, 0x82, 0x13, 0xac, 0xb4, 0x78, 0xe7, 0x67, 0x8b, 0x77,
    -	0xa3, 0x7e, 0x7a, 0x56, 0xc9, 0xbc, 0x38, 0xab, 0x64, 0x5e, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a,
    -	0x72, 0x1a, 0x56, 0x94, 0x17, 0x61, 0x45, 0x79, 0x19, 0x56, 0x94, 0xbf, 0xc2, 0x8a, 0xf2, 0xdd,
    -	0xdf, 0x95, 0xcc, 0xb3, 0xb5, 0x29, 0xff, 0x94, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x45, 0x6f,
    -	0xe0, 0x61, 0x47, 0x0f, 0x00, 0x00,
    +	// 1247 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5,
    +	0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64,
    +	0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b,
    +	0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3,
    +	0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11,
    +	0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76,
    +	0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c,
    +	0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e,
    +	0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d,
    +	0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3,
    +	0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c,
    +	0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e,
    +	0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29,
    +	0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b,
    +	0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36,
    +	0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40,
    +	0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60,
    +	0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9,
    +	0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5,
    +	0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94,
    +	0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18,
    +	0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18,
    +	0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83,
    +	0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d,
    +	0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d,
    +	0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27,
    +	0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b,
    +	0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1,
    +	0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3,
    +	0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7,
    +	0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d,
    +	0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26,
    +	0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94,
    +	0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1,
    +	0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e,
    +	0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20,
    +	0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0,
    +	0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b,
    +	0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e,
    +	0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55,
    +	0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf,
    +	0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf,
    +	0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41,
    +	0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae,
    +	0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed,
    +	0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95,
    +	0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3,
    +	0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d,
    +	0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3,
    +	0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed,
    +	0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c,
    +	0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf,
    +	0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7,
    +	0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06,
    +	0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5,
    +	0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1,
    +	0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97,
    +	0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c,
    +	0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1,
    +	0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe,
    +	0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca,
    +	0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12,
    +	0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12,
    +	0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f,
    +	0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c,
    +	0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3,
    +	0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45,
    +	0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85,
    +	0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9,
    +	0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e,
    +	0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14,
    +	0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f,
    +	0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02,
    +	0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08,
    +	0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79,
    +	0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4,
    +	0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff,
    +	0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00,
     }
     
     func (m ExtraValue) Marshal() (dAtA []byte, err error) {
    @@ -565,6 +631,90 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *FieldSelectorAttributes) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *FieldSelectorAttributes) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *FieldSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Requirements) > 0 {
    +		for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.RawSelector)
    +	copy(dAtA[i:], m.RawSelector)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *LabelSelectorAttributes) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LabelSelectorAttributes) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LabelSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Requirements) > 0 {
    +		for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.RawSelector)
    +	copy(dAtA[i:], m.RawSelector)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -712,6 +862,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.LabelSelector != nil {
    +		{
    +			size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x4a
    +	}
    +	if m.FieldSelector != nil {
    +		{
    +			size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x42
    +	}
     	i -= len(m.Name)
     	copy(dAtA[i:], m.Name)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    @@ -1277,6 +1451,40 @@ func (m ExtraValue) Size() (n int) {
     	return n
     }
     
    +func (m *FieldSelectorAttributes) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.RawSelector)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Requirements) > 0 {
    +		for _, e := range m.Requirements {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *LabelSelectorAttributes) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.RawSelector)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Requirements) > 0 {
    +		for _, e := range m.Requirements {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *LocalSubjectAccessReview) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -1346,6 +1554,14 @@ func (m *ResourceAttributes) Size() (n int) {
     	n += 1 + l + sovGenerated(uint64(l))
     	l = len(m.Name)
     	n += 1 + l + sovGenerated(uint64(l))
    +	if m.FieldSelector != nil {
    +		l = m.FieldSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.LabelSelector != nil {
    +		l = m.LabelSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -1536,6 +1752,38 @@ func sovGenerated(x uint64) (n int) {
     func sozGenerated(x uint64) (n int) {
     	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
     }
    +func (this *FieldSelectorAttributes) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRequirements := "[]FieldSelectorRequirement{"
    +	for _, f := range this.Requirements {
    +		repeatedStringForRequirements += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForRequirements += "}"
    +	s := strings.Join([]string{`&FieldSelectorAttributes{`,
    +		`RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`,
    +		`Requirements:` + repeatedStringForRequirements + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *LabelSelectorAttributes) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRequirements := "[]LabelSelectorRequirement{"
    +	for _, f := range this.Requirements {
    +		repeatedStringForRequirements += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForRequirements += "}"
    +	s := strings.Join([]string{`&LabelSelectorAttributes{`,
    +		`RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`,
    +		`Requirements:` + repeatedStringForRequirements + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *LocalSubjectAccessReview) String() string {
     	if this == nil {
     		return "nil"
    @@ -1582,6 +1830,8 @@ func (this *ResourceAttributes) String() string {
     		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
     		`Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`FieldSelector:` + strings.Replace(this.FieldSelector.String(), "FieldSelectorAttributes", "FieldSelectorAttributes", 1) + `,`,
    +		`LabelSelector:` + strings.Replace(this.LabelSelector.String(), "LabelSelectorAttributes", "LabelSelectorAttributes", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -1807,6 +2057,238 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *FieldSelectorAttributes) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: FieldSelectorAttributes: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: FieldSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RawSelector = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requirements = append(m.Requirements, v1.FieldSelectorRequirement{})
    +			if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LabelSelectorAttributes) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LabelSelectorAttributes: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LabelSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.RawSelector = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requirements = append(m.Requirements, v1.LabelSelectorRequirement{})
    +			if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    @@ -2437,6 +2919,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error {
     			}
     			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.FieldSelector == nil {
    +				m.FieldSelector = &FieldSelectorAttributes{}
    +			}
    +			if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 9:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.LabelSelector == nil {
    +				m.LabelSelector = &LabelSelectorAttributes{}
    +			}
    +			if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
    index 83283d0bdb..37b05b8552 100644
    --- a/vendor/k8s.io/api/authorization/v1/generated.proto
    +++ b/vendor/k8s.io/api/authorization/v1/generated.proto
    @@ -37,6 +37,60 @@ message ExtraValue {
       repeated string items = 1;
     }
     
    +// FieldSelectorAttributes indicates a field limited access.
    +// Webhook authors are encouraged to
    +// * ensure rawSelector and requirements are not both set
    +// * consider the requirements field if set
    +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details.
    +// For the *SubjectAccessReview endpoints of the kube-apiserver:
    +// * If rawSelector is empty and requirements are empty, the request is not limited.
    +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds.
    +// * If rawSelector is empty and requirements are present, the requirements should be honored
    +// * If rawSelector is present and requirements are present, the request is invalid.
    +message FieldSelectorAttributes {
    +  // rawSelector is the serialization of a field selector that would be included in a query parameter.
    +  // Webhook implementations are encouraged to ignore rawSelector.
    +  // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.
    +  // +optional
    +  optional string rawSelector = 1;
    +
    +  // requirements is the parsed interpretation of a field selector.
    +  // All requirements must be met for a resource instance to match the selector.
    +  // Webhook implementations should handle requirements, but how to handle them is up to the webhook.
    +  // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements
    +  // are not understood.
    +  // +optional
    +  // +listType=atomic
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement requirements = 2;
    +}
    +
    +// LabelSelectorAttributes indicates a label limited access.
    +// Webhook authors are encouraged to
    +// * ensure rawSelector and requirements are not both set
    +// * consider the requirements field if set
    +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details.
    +// For the *SubjectAccessReview endpoints of the kube-apiserver:
    +// * If rawSelector is empty and requirements are empty, the request is not limited.
    +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds.
    +// * If rawSelector is empty and requirements are present, the requirements should be honored
    +// * If rawSelector is present and requirements are present, the request is invalid.
    +message LabelSelectorAttributes {
    +  // rawSelector is the serialization of a field selector that would be included in a query parameter.
    +  // Webhook implementations are encouraged to ignore rawSelector.
    +  // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.
    +  // +optional
    +  optional string rawSelector = 1;
    +
    +  // requirements is the parsed interpretation of a label selector.
    +  // All requirements must be met for a resource instance to match the selector.
    +  // Webhook implementations should handle requirements, but how to handle them is up to the webhook.
    +  // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements
    +  // are not understood.
    +  // +optional
    +  // +listType=atomic
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement requirements = 2;
    +}
    +
     // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
     // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
     // checking.
    @@ -44,7 +98,7 @@ message LocalSubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
       // you made the request against.  If empty, it is defaulted.
    @@ -111,6 +165,20 @@ message ResourceAttributes {
       // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
       // +optional
       optional string name = 7;
    +
    +  // fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    +  //
    +  // This field  is alpha-level. To use this field, you must enable the
    +  // `AuthorizeWithSelectors` feature gate (disabled by default).
    +  // +optional
    +  optional FieldSelectorAttributes fieldSelector = 8;
    +
    +  // labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    +  //
    +  // This field  is alpha-level. To use this field, you must enable the
    +  // `AuthorizeWithSelectors` feature gate (disabled by default).
    +  // +optional
    +  optional LabelSelectorAttributes labelSelector = 9;
     }
     
     // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
    @@ -145,7 +213,7 @@ message SelfSubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.  user and groups must be empty
       optional SelfSubjectAccessReviewSpec spec = 2;
    @@ -177,7 +245,7 @@ message SelfSubjectRulesReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.
       optional SelfSubjectRulesReviewSpec spec = 2;
    @@ -198,7 +266,7 @@ message SubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated
       optional SubjectAccessReviewSpec spec = 2;
    diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
    index 3b42956f89..36f5fa4107 100644
    --- a/vendor/k8s.io/api/authorization/v1/types.go
    +++ b/vendor/k8s.io/api/authorization/v1/types.go
    @@ -26,6 +26,7 @@ import (
     // +genclient:nonNamespaced
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.6
     
     // SubjectAccessReview checks whether or not a user or group can perform an action.
     type SubjectAccessReview struct {
    @@ -47,6 +48,7 @@ type SubjectAccessReview struct {
     // +genclient:nonNamespaced
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
     // spec.namespace means "in all namespaces".  Self is a special case, because users should always be able
    @@ -69,6 +71,7 @@ type SelfSubjectAccessReview struct {
     // +genclient
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
     // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
    @@ -115,6 +118,72 @@ type ResourceAttributes struct {
     	// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
     	// +optional
     	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
    +	// fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    +	//
    +	// This field  is alpha-level. To use this field, you must enable the
    +	// `AuthorizeWithSelectors` feature gate (disabled by default).
    +	// +optional
    +	FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
    +	// labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    +	//
    +	// This field  is alpha-level. To use this field, you must enable the
    +	// `AuthorizeWithSelectors` feature gate (disabled by default).
    +	// +optional
    +	LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
    +}
    +
    +// LabelSelectorAttributes indicates a label limited access.
    +// Webhook authors are encouraged to
    +// * ensure rawSelector and requirements are not both set
    +// * consider the requirements field if set
    +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details.
    +// For the *SubjectAccessReview endpoints of the kube-apiserver:
    +// * If rawSelector is empty and requirements are empty, the request is not limited.
    +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds.
    +// * If rawSelector is empty and requirements are present, the requirements should be honored
    +// * If rawSelector is present and requirements are present, the request is invalid.
    +type LabelSelectorAttributes struct {
    +	// rawSelector is the serialization of a field selector that would be included in a query parameter.
    +	// Webhook implementations are encouraged to ignore rawSelector.
    +	// The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.
    +	// +optional
    +	RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"`
    +
    +	// requirements is the parsed interpretation of a label selector.
    +	// All requirements must be met for a resource instance to match the selector.
    +	// Webhook implementations should handle requirements, but how to handle them is up to the webhook.
    +	// Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements
    +	// are not understood.
    +	// +optional
    +	// +listType=atomic
    +	Requirements []metav1.LabelSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"`
    +}
    +
    +// FieldSelectorAttributes indicates a field limited access.
    +// Webhook authors are encouraged to
    +// * ensure rawSelector and requirements are not both set
    +// * consider the requirements field if set
    +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details.
    +// For the *SubjectAccessReview endpoints of the kube-apiserver:
    +// * If rawSelector is empty and requirements are empty, the request is not limited.
    +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds.
    +// * If rawSelector is empty and requirements are present, the requirements should be honored
    +// * If rawSelector is present and requirements are present, the request is invalid.
    +type FieldSelectorAttributes struct {
    +	// rawSelector is the serialization of a field selector that would be included in a query parameter.
    +	// Webhook implementations are encouraged to ignore rawSelector.
    +	// The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.
    +	// +optional
    +	RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"`
    +
    +	// requirements is the parsed interpretation of a field selector.
    +	// All requirements must be met for a resource instance to match the selector.
    +	// Webhook implementations should handle requirements, but how to handle them is up to the webhook.
    +	// Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements
    +	// are not understood.
    +	// +optional
    +	// +listType=atomic
    +	Requirements []metav1.FieldSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"`
     }
     
     // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
    @@ -198,6 +267,7 @@ type SubjectAccessReviewStatus struct {
     // +genclient:nonNamespaced
     // +genclient:onlyVerbs=create
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
     // The returned list of actions may be incomplete depending on the server's authorization mode,
    diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    index 93229485cc..dc6b8a89ec 100644
    --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
    @@ -27,6 +27,26 @@ package v1
     // Those methods can be generated by using hack/update-codegen.sh
     
     // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    +var map_FieldSelectorAttributes = map[string]string{
    +	"":             "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.",
    +	"rawSelector":  "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.",
    +	"requirements": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.",
    +}
    +
    +func (FieldSelectorAttributes) SwaggerDoc() map[string]string {
    +	return map_FieldSelectorAttributes
    +}
    +
    +var map_LabelSelectorAttributes = map[string]string{
    +	"":             "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.",
    +	"rawSelector":  "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.",
    +	"requirements": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.",
    +}
    +
    +func (LabelSelectorAttributes) SwaggerDoc() map[string]string {
    +	return map_LabelSelectorAttributes
    +}
    +
     var map_LocalSubjectAccessReview = map[string]string{
     	"":         "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    @@ -59,14 +79,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string {
     }
     
     var map_ResourceAttributes = map[string]string{
    -	"":            "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
    -	"namespace":   "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
    -	"verb":        "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
    -	"group":       "Group is the API Group of the Resource.  \"*\" means all.",
    -	"version":     "Version is the API Version of the Resource.  \"*\" means all.",
    -	"resource":    "Resource is one of the existing resource types.  \"*\" means all.",
    -	"subresource": "Subresource is one of the existing resource types.  \"\" means none.",
    -	"name":        "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
    +	"":              "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
    +	"namespace":     "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
    +	"verb":          "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
    +	"group":         "Group is the API Group of the Resource.  \"*\" means all.",
    +	"version":       "Version is the API Version of the Resource.  \"*\" means all.",
    +	"resource":      "Resource is one of the existing resource types.  \"*\" means all.",
    +	"subresource":   "Subresource is one of the existing resource types.  \"\" means none.",
    +	"name":          "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
    +	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
    +	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
     }
     
     func (ResourceAttributes) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
    index f1d49eb386..7f040f5c56 100644
    --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
    @@ -22,6 +22,7 @@ limitations under the License.
     package v1
     
     import (
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    @@ -45,6 +46,52 @@ func (in ExtraValue) DeepCopy() ExtraValue {
     	return *out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) {
    +	*out = *in
    +	if in.Requirements != nil {
    +		in, out := &in.Requirements, &out.Requirements
    +		*out = make([]metav1.FieldSelectorRequirement, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes.
    +func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(FieldSelectorAttributes)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) {
    +	*out = *in
    +	if in.Requirements != nil {
    +		in, out := &in.Requirements, &out.Requirements
    +		*out = make([]metav1.LabelSelectorRequirement, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes.
    +func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LabelSelectorAttributes)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
     	*out = *in
    @@ -118,6 +165,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) {
     	*out = *in
    +	if in.FieldSelector != nil {
    +		in, out := &in.FieldSelector, &out.FieldSelector
    +		*out = new(FieldSelectorAttributes)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.LabelSelector != nil {
    +		in, out := &in.LabelSelector, &out.LabelSelector
    +		*out = new(LabelSelectorAttributes)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -201,7 +258,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview
     	if in.ResourceAttributes != nil {
     		in, out := &in.ResourceAttributes, &out.ResourceAttributes
     		*out = new(ResourceAttributes)
    -		**out = **in
    +		(*in).DeepCopyInto(*out)
     	}
     	if in.NonResourceAttributes != nil {
     		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
    @@ -299,7 +356,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
     	if in.ResourceAttributes != nil {
     		in, out := &in.ResourceAttributes, &out.ResourceAttributes
     		*out = new(ResourceAttributes)
    -		**out = **in
    +		(*in).DeepCopyInto(*out)
     	}
     	if in.NonResourceAttributes != nil {
     		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
    diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..b0c0475b48
    --- /dev/null
    +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,46 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LocalSubjectAccessReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SelfSubjectAccessReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SelfSubjectRulesReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SubjectAccessReview) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 6
    +}
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
    index 28642ba638..5007d1b496 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
    @@ -26,6 +26,7 @@ import (
     
     	proto "github.com/gogo/protobuf/proto"
     	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
    +	v11 "k8s.io/api/authorization/v1"
     
     	math "math"
     	math_bits "math/bits"
    @@ -459,78 +460,82 @@ func init() {
     }
     
     var fileDescriptor_8eab727787743457 = []byte{
    -	// 1130 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
    -	0x17, 0xf7, 0xfa, 0x47, 0x62, 0x3f, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xe6, 0x2b, 0x6c, 0xcb,
    -	0x48, 0x28, 0x88, 0xb2, 0xdb, 0x44, 0x85, 0x94, 0x40, 0x0f, 0xb1, 0x12, 0x50, 0xa4, 0xb6, 0x54,
    -	0x13, 0x25, 0x07, 0x2a, 0x01, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xb3, 0x0e, 0x41,
    -	0x1c, 0x7a, 0xe4, 0xc8, 0x91, 0x23, 0x27, 0xfe, 0x07, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x20,
    -	0x21, 0x8b, 0x2c, 0x7f, 0x04, 0x57, 0x34, 0xb3, 0x63, 0xef, 0x3a, 0xd9, 0xc4, 0x49, 0x0e, 0xf4,
    -	0xd2, 0x9b, 0xe7, 0x7d, 0x3e, 0xef, 0xcd, 0x7b, 0x6f, 0xde, 0x7b, 0xfb, 0x0c, 0xf7, 0x0f, 0x1e,
    -	0x32, 0xc3, 0x76, 0x4d, 0xe2, 0xd9, 0x26, 0x09, 0x78, 0xc7, 0xf5, 0xed, 0x6f, 0x09, 0xb7, 0x5d,
    -	0xc7, 0xec, 0xaf, 0x34, 0x29, 0x27, 0x2b, 0x66, 0x9b, 0x3a, 0xd4, 0x27, 0x9c, 0xb6, 0x0c, 0xcf,
    -	0x77, 0xb9, 0x8b, 0x6a, 0x91, 0x86, 0x41, 0x3c, 0xdb, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xd2, 0xbb,
    -	0x6d, 0x9b, 0x77, 0x82, 0xa6, 0x61, 0xb9, 0x3d, 0xb3, 0xed, 0xb6, 0x5d, 0x53, 0x2a, 0x36, 0x83,
    -	0x7d, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x32, 0xb8, 0xf4, 0x20, 0x76, 0xa1, 0x47, 0xac, 0x8e, 0xed,
    -	0x50, 0xff, 0xc8, 0xf4, 0x0e, 0xda, 0x42, 0xc0, 0xcc, 0x1e, 0xe5, 0xc4, 0xec, 0x9f, 0x73, 0x63,
    -	0xc9, 0xbc, 0x48, 0xcb, 0x0f, 0x1c, 0x6e, 0xf7, 0xe8, 0x39, 0x85, 0xf7, 0x27, 0x29, 0x30, 0xab,
    -	0x43, 0x7b, 0xe4, 0xac, 0x5e, 0x7d, 0x0d, 0x60, 0xeb, 0x1b, 0xee, 0x93, 0x3d, 0xd2, 0x0d, 0x28,
    -	0xaa, 0x42, 0xc1, 0xe6, 0xb4, 0xc7, 0x74, 0xad, 0x96, 0x5b, 0x2e, 0x35, 0x4a, 0xe1, 0xa0, 0x5a,
    -	0xd8, 0x16, 0x02, 0x1c, 0xc9, 0xd7, 0x8b, 0x3f, 0xfe, 0x54, 0xcd, 0xbc, 0xf8, 0xb3, 0x96, 0xa9,
    -	0xff, 0x9a, 0x05, 0xfd, 0xb1, 0x6b, 0x91, 0xee, 0x4e, 0xd0, 0xfc, 0x8a, 0x5a, 0x7c, 0xc3, 0xb2,
    -	0x28, 0x63, 0x98, 0xf6, 0x6d, 0x7a, 0x88, 0xbe, 0x84, 0xa2, 0x88, 0xac, 0x45, 0x38, 0xd1, 0xb5,
    -	0x9a, 0xb6, 0x5c, 0x5e, 0xbd, 0x6f, 0xc4, 0x89, 0x1d, 0x39, 0x68, 0x78, 0x07, 0x6d, 0x21, 0x60,
    -	0x86, 0x60, 0x1b, 0xfd, 0x15, 0xe3, 0x53, 0x69, 0xeb, 0x09, 0xe5, 0xa4, 0x81, 0x8e, 0x07, 0xd5,
    -	0x4c, 0x38, 0xa8, 0x42, 0x2c, 0xc3, 0x23, 0xab, 0xe8, 0x39, 0xe4, 0x99, 0x47, 0x2d, 0x3d, 0x2b,
    -	0xad, 0x7f, 0x60, 0x4c, 0x7a, 0x36, 0x23, 0xc5, 0xcd, 0x1d, 0x8f, 0x5a, 0x8d, 0x5b, 0xea, 0x9a,
    -	0xbc, 0x38, 0x61, 0x69, 0x14, 0x59, 0x30, 0xc5, 0x38, 0xe1, 0x01, 0xd3, 0x73, 0xd2, 0xfc, 0x87,
    -	0x37, 0x33, 0x2f, 0x4d, 0x34, 0xfe, 0xa7, 0x2e, 0x98, 0x8a, 0xce, 0x58, 0x99, 0xae, 0x3f, 0x87,
    -	0x85, 0xa7, 0xae, 0x83, 0x29, 0x73, 0x03, 0xdf, 0xa2, 0x1b, 0x9c, 0xfb, 0x76, 0x33, 0xe0, 0x94,
    -	0xa1, 0x1a, 0xe4, 0x3d, 0xc2, 0x3b, 0x32, 0x71, 0xa5, 0xd8, 0xbf, 0x67, 0x84, 0x77, 0xb0, 0x44,
    -	0x04, 0xa3, 0x4f, 0xfd, 0xa6, 0x0c, 0x3e, 0xc1, 0xd8, 0xa3, 0x7e, 0x13, 0x4b, 0xa4, 0xfe, 0x35,
    -	0xcc, 0x26, 0x8c, 0xe3, 0xa0, 0x2b, 0xdf, 0x56, 0x40, 0x63, 0x6f, 0x2b, 0x34, 0x18, 0x8e, 0xe4,
    -	0xe8, 0x11, 0xcc, 0x3a, 0xb1, 0xce, 0x2e, 0x7e, 0xcc, 0xf4, 0xac, 0xa4, 0xce, 0x87, 0x83, 0x6a,
    -	0xd2, 0x9c, 0x80, 0xf0, 0x59, 0xae, 0x28, 0x08, 0x94, 0x12, 0x8d, 0x09, 0x25, 0x87, 0xf4, 0x28,
    -	0xf3, 0x88, 0x45, 0x55, 0x48, 0xb7, 0x95, 0xc3, 0xa5, 0xa7, 0x43, 0x00, 0xc7, 0x9c, 0xc9, 0xc1,
    -	0xa1, 0x37, 0xa1, 0xd0, 0xf6, 0xdd, 0xc0, 0x93, 0xaf, 0x53, 0x6a, 0xcc, 0x28, 0x4a, 0xe1, 0x13,
    -	0x21, 0xc4, 0x11, 0x86, 0xde, 0x86, 0xe9, 0x3e, 0xf5, 0x99, 0xed, 0x3a, 0x7a, 0x5e, 0xd2, 0x66,
    -	0x15, 0x6d, 0x7a, 0x2f, 0x12, 0xe3, 0x21, 0x8e, 0xee, 0x41, 0xd1, 0x57, 0x8e, 0xeb, 0x05, 0xc9,
    -	0x9d, 0x53, 0xdc, 0xe2, 0x28, 0x83, 0x23, 0x06, 0x7a, 0x0f, 0xca, 0x2c, 0x68, 0x8e, 0x14, 0xa6,
    -	0xa4, 0xc2, 0xbc, 0x52, 0x28, 0xef, 0xc4, 0x10, 0x4e, 0xf2, 0x44, 0x58, 0x22, 0x46, 0x7d, 0x7a,
    -	0x3c, 0x2c, 0x91, 0x02, 0x2c, 0x91, 0xfa, 0xef, 0x1a, 0xdc, 0xba, 0xde, 0x8b, 0xbd, 0x03, 0x25,
    -	0xe2, 0xd9, 0x32, 0xec, 0xe1, 0x5b, 0xcd, 0x88, 0xbc, 0x6e, 0x3c, 0xdb, 0x8e, 0x84, 0x38, 0xc6,
    -	0x05, 0x79, 0xe8, 0x8c, 0xa8, 0xeb, 0x11, 0x79, 0x78, 0x25, 0xc3, 0x31, 0x8e, 0xd6, 0x60, 0x66,
    -	0x78, 0x90, 0x8f, 0xa4, 0xe7, 0xa5, 0xc2, 0xed, 0x70, 0x50, 0x9d, 0xc1, 0x49, 0x00, 0x8f, 0xf3,
    -	0xea, 0xbf, 0x65, 0x61, 0x71, 0x87, 0x76, 0xf7, 0x5f, 0xcd, 0x54, 0xf8, 0x62, 0x6c, 0x2a, 0x3c,
    -	0xba, 0x42, 0xdb, 0xa6, 0xbb, 0xfa, 0x6a, 0x27, 0xc3, 0xcf, 0x59, 0xf8, 0xff, 0x25, 0x8e, 0xa1,
    -	0xef, 0x00, 0xf9, 0xe7, 0x1a, 0x4d, 0x65, 0xf4, 0xc1, 0x64, 0x87, 0xce, 0x37, 0x69, 0xe3, 0x4e,
    -	0x38, 0xa8, 0xa6, 0x34, 0x2f, 0x4e, 0xb9, 0x07, 0x7d, 0xaf, 0xc1, 0x82, 0x93, 0x36, 0xb8, 0x54,
    -	0xd6, 0xd7, 0x26, 0x7b, 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xe2, 0xf4,
    -	0x0b, 0xc5, 0xc8, 0xb9, 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xae, 0xd6, 0x3e, 0x1f, 0xab, 0xb5,
    -	0x8f, 0xae, 0x55, 0x6b, 0x09, 0x4f, 0x2f, 0x2c, 0xb5, 0xe6, 0x99, 0x52, 0x5b, 0xbf, 0x72, 0xa9,
    -	0x25, 0xad, 0x5f, 0x5e, 0x69, 0x4f, 0x60, 0xe9, 0x62, 0xaf, 0xae, 0x3d, 0xba, 0xeb, 0xbf, 0x64,
    -	0x61, 0xfe, 0xf5, 0x3a, 0x70, 0xb3, 0xa6, 0x3f, 0xc9, 0xc3, 0xe2, 0xeb, 0x86, 0xbf, 0xbc, 0xe1,
    -	0xc5, 0x47, 0x34, 0x60, 0xd4, 0x57, 0x1f, 0xfe, 0xd1, 0x5b, 0xed, 0x32, 0xea, 0x63, 0x89, 0xa0,
    -	0xda, 0x70, 0x37, 0x88, 0x3e, 0x58, 0x20, 0x32, 0xad, 0xbe, 0x85, 0x6a, 0x31, 0xb0, 0xa1, 0x40,
    -	0xc5, 0xc6, 0xab, 0x17, 0x6a, 0xb9, 0xe5, 0xf2, 0xea, 0xe6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3,
    -	0x96, 0xc3, 0xfd, 0xa3, 0x78, 0x07, 0x91, 0x32, 0x1c, 0xdd, 0x80, 0xde, 0x80, 0x5c, 0x60, 0xb7,
    -	0xd4, 0x8a, 0x50, 0x56, 0x94, 0xdc, 0xee, 0xf6, 0x26, 0x16, 0xf2, 0xa5, 0x7d, 0xb5, 0x7b, 0x4b,
    -	0x13, 0x68, 0x0e, 0x72, 0x07, 0xf4, 0x28, 0xea, 0x33, 0x2c, 0x7e, 0xa2, 0x06, 0x14, 0xfa, 0x62,
    -	0x2d, 0x57, 0x79, 0xbe, 0x37, 0xd9, 0xd3, 0x78, 0x95, 0xc7, 0x91, 0xea, 0x7a, 0xf6, 0xa1, 0x56,
    -	0xff, 0x43, 0x83, 0xbb, 0x17, 0x16, 0xa4, 0x58, 0x94, 0x48, 0xb7, 0xeb, 0x1e, 0xd2, 0x96, 0xbc,
    -	0xbb, 0x18, 0x2f, 0x4a, 0x1b, 0x91, 0x18, 0x0f, 0x71, 0xf4, 0x16, 0x4c, 0xb5, 0xa8, 0x63, 0xd3,
    -	0x96, 0x5c, 0xa9, 0x8a, 0x71, 0x2d, 0x6f, 0x4a, 0x29, 0x56, 0xa8, 0xe0, 0xf9, 0x94, 0x30, 0xd7,
    -	0x51, 0x4b, 0xdc, 0x88, 0x87, 0xa5, 0x14, 0x2b, 0x14, 0x6d, 0xc0, 0x2c, 0x15, 0x6e, 0xca, 0x20,
    -	0xb6, 0x7c, 0xdf, 0x1d, 0xbe, 0xec, 0xa2, 0x52, 0x98, 0xdd, 0x1a, 0x87, 0xf1, 0x59, 0x7e, 0xfd,
    -	0x9f, 0x2c, 0xe8, 0x17, 0x8d, 0x3d, 0x74, 0x10, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xd5,
    -	0xb8, 0x7a, 0xcb, 0x08, 0xb5, 0xc6, 0x82, 0xf2, 0x66, 0x26, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x88,
    -	0x0e, 0x61, 0xce, 0x19, 0x5f, 0xb9, 0xa3, 0x9d, 0xac, 0xbc, 0xba, 0x72, 0xad, 0x06, 0x91, 0x57,
    -	0xea, 0xea, 0xca, 0xb9, 0x33, 0x00, 0xc3, 0xe7, 0x2e, 0x41, 0xab, 0x00, 0xb6, 0x63, 0xb9, 0x3d,
    -	0xaf, 0x4b, 0x39, 0x95, 0x09, 0x2c, 0xc6, 0xd3, 0x72, 0x7b, 0x84, 0xe0, 0x04, 0x2b, 0x2d, 0xf3,
    -	0xf9, 0xeb, 0x65, 0xbe, 0xf1, 0xf1, 0xf1, 0x69, 0x25, 0xf3, 0xf2, 0xb4, 0x92, 0x39, 0x39, 0xad,
    -	0x64, 0x5e, 0x84, 0x15, 0xed, 0x38, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x12, 0x56, 0xb4, 0xbf,
    -	0xc2, 0x8a, 0xf6, 0xc3, 0xdf, 0x95, 0xcc, 0x67, 0xb5, 0x49, 0xff, 0xc0, 0xff, 0x0d, 0x00, 0x00,
    -	0xff, 0xff, 0xcd, 0x08, 0x09, 0x84, 0xa4, 0x0f, 0x00, 0x00,
    +	// 1192 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44,
    +	0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24,
    +	0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56,
    +	0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71,
    +	0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c,
    +	0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b,
    +	0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43,
    +	0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e,
    +	0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae,
    +	0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b,
    +	0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2,
    +	0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8,
    +	0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3,
    +	0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07,
    +	0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1,
    +	0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9,
    +	0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e,
    +	0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22,
    +	0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac,
    +	0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73,
    +	0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0,
    +	0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7,
    +	0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62,
    +	0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa,
    +	0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92,
    +	0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c,
    +	0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67,
    +	0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16,
    +	0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb,
    +	0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0,
    +	0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd,
    +	0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3,
    +	0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7,
    +	0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8,
    +	0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83,
    +	0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e,
    +	0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce,
    +	0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39,
    +	0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15,
    +	0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6,
    +	0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb,
    +	0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7,
    +	0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27,
    +	0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e,
    +	0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1,
    +	0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5,
    +	0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce,
    +	0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69,
    +	0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c,
    +	0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6,
    +	0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad,
    +	0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc,
    +	0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e,
    +	0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1,
    +	0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9,
    +	0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e,
    +	0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3,
    +	0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50,
    +	0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3,
    +	0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64,
    +	0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9,
    +	0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3,
    +	0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06,
    +	0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88,
    +	0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c,
    +	0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b,
    +	0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd,
    +	0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30,
    +	0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9,
    +	0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08,
    +	0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd,
    +	0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22,
    +	0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95,
    +	0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
    +	0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00,
     }
     
     func (m ExtraValue) Marshal() (dAtA []byte, err error) {
    @@ -712,6 +717,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.LabelSelector != nil {
    +		{
    +			size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x4a
    +	}
    +	if m.FieldSelector != nil {
    +		{
    +			size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x42
    +	}
     	i -= len(m.Name)
     	copy(dAtA[i:], m.Name)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    @@ -1346,6 +1375,14 @@ func (m *ResourceAttributes) Size() (n int) {
     	n += 1 + l + sovGenerated(uint64(l))
     	l = len(m.Name)
     	n += 1 + l + sovGenerated(uint64(l))
    +	if m.FieldSelector != nil {
    +		l = m.FieldSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.LabelSelector != nil {
    +		l = m.LabelSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -1582,6 +1619,8 @@ func (this *ResourceAttributes) String() string {
     		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
     		`Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`FieldSelector:` + strings.Replace(fmt.Sprintf("%v", this.FieldSelector), "FieldSelectorAttributes", "v11.FieldSelectorAttributes", 1) + `,`,
    +		`LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelectorAttributes", "v11.LabelSelectorAttributes", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -2437,6 +2476,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error {
     			}
     			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    +		case 8:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.FieldSelector == nil {
    +				m.FieldSelector = &v11.FieldSelectorAttributes{}
    +			}
    +			if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 9:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.LabelSelector == nil {
    +				m.LabelSelector = &v11.LabelSelectorAttributes{}
    +			}
    +			if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
    index 43bea7aa12..8738768b89 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
    @@ -21,6 +21,7 @@ syntax = "proto2";
     
     package k8s.io.api.authorization.v1beta1;
     
    +import "k8s.io/api/authorization/v1/generated.proto";
     import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
     import "k8s.io/apimachinery/pkg/runtime/generated.proto";
     import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    @@ -44,7 +45,7 @@ message LocalSubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
       // you made the request against.  If empty, it is defaulted.
    @@ -111,6 +112,14 @@ message ResourceAttributes {
       // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
       // +optional
       optional string name = 7;
    +
    +  // fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    +  // +optional
    +  optional .k8s.io.api.authorization.v1.FieldSelectorAttributes fieldSelector = 8;
    +
    +  // labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    +  // +optional
    +  optional .k8s.io.api.authorization.v1.LabelSelectorAttributes labelSelector = 9;
     }
     
     // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
    @@ -145,7 +154,7 @@ message SelfSubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.  user and groups must be empty
       optional SelfSubjectAccessReviewSpec spec = 2;
    @@ -177,7 +186,7 @@ message SelfSubjectRulesReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated.
       optional SelfSubjectRulesReviewSpec spec = 2;
    @@ -198,7 +207,7 @@ message SubjectAccessReview {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec holds information about the request being evaluated
       optional SubjectAccessReviewSpec spec = 2;
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go
    index ef3a501b05..8b8e5a9867 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/types.go
    +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go
    @@ -19,6 +19,7 @@ package v1beta1
     import (
     	"fmt"
     
    +	authorizationv1 "k8s.io/api/authorization/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    @@ -124,6 +125,12 @@ type ResourceAttributes struct {
     	// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
     	// +optional
     	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
    +	// fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
    +	// +optional
    +	FieldSelector *authorizationv1.FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
    +	// labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
    +	// +optional
    +	LabelSelector *authorizationv1.LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
     }
     
     // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
    index e0846be7a4..bb1352a2d9 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
    @@ -59,14 +59,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string {
     }
     
     var map_ResourceAttributes = map[string]string{
    -	"":            "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
    -	"namespace":   "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
    -	"verb":        "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
    -	"group":       "Group is the API Group of the Resource.  \"*\" means all.",
    -	"version":     "Version is the API Version of the Resource.  \"*\" means all.",
    -	"resource":    "Resource is one of the existing resource types.  \"*\" means all.",
    -	"subresource": "Subresource is one of the existing resource types.  \"\" means none.",
    -	"name":        "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
    +	"":              "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
    +	"namespace":     "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
    +	"verb":          "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
    +	"group":         "Group is the API Group of the Resource.  \"*\" means all.",
    +	"version":       "Version is the API Version of the Resource.  \"*\" means all.",
    +	"resource":      "Resource is one of the existing resource types.  \"*\" means all.",
    +	"subresource":   "Subresource is one of the existing resource types.  \"\" means none.",
    +	"name":          "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
    +	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.",
    +	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.",
     }
     
     func (ResourceAttributes) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
    index 13f09cf2d2..d76993dba4 100644
    --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
    @@ -22,6 +22,7 @@ limitations under the License.
     package v1beta1
     
     import (
    +	v1 "k8s.io/api/authorization/v1"
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    @@ -118,6 +119,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) {
     	*out = *in
    +	if in.FieldSelector != nil {
    +		in, out := &in.FieldSelector, &out.FieldSelector
    +		*out = new(v1.FieldSelectorAttributes)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.LabelSelector != nil {
    +		in, out := &in.LabelSelector, &out.LabelSelector
    +		*out = new(v1.LabelSelectorAttributes)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -201,7 +212,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview
     	if in.ResourceAttributes != nil {
     		in, out := &in.ResourceAttributes, &out.ResourceAttributes
     		*out = new(ResourceAttributes)
    -		**out = **in
    +		(*in).DeepCopyInto(*out)
     	}
     	if in.NonResourceAttributes != nil {
     		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
    @@ -299,7 +310,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
     	if in.ResourceAttributes != nil {
     		in, out := &in.ResourceAttributes, &out.ResourceAttributes
     		*out = new(ResourceAttributes)
    -		**out = **in
    +		(*in).DeepCopyInto(*out)
     	}
     	if in.NonResourceAttributes != nil {
     		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
    diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go
    index 8c9c09b5cb..d64c9cbc1a 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go
    @@ -17,5 +17,6 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     package v1 // import "k8s.io/api/autoscaling/v1"
    diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
    index 1dbafd1a53..0a961312f4 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
    @@ -51,7 +51,7 @@ message ContainerResourceMetricSource {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
     
       // container is the name of the container in the pods of the scaling target.
       optional string container = 5;
    @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // It will always be set, regardless of the corresponding metric specification.
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
     
       // container is the name of the container in the pods of the scaling taget
       optional string container = 4;
    @@ -108,17 +108,17 @@ message ExternalMetricSource {
       // metricSelector is used to identify a specific time series
       // within a given metric.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
     
       // targetValue is the target value of the metric (as a quantity).
       // Mutually exclusive with TargetAverageValue.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
     
       // targetAverageValue is the target per-pod value of global metric (as a quantity).
       // Mutually exclusive with TargetValue.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
     }
     
     // ExternalMetricStatus indicates the current value of a global metric
    @@ -131,21 +131,21 @@ message ExternalMetricStatus {
       // metricSelector is used to identify a specific time series
       // within a given metric.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
     
       // currentValue is the current value of the metric (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
     
       // currentAverageValue is the current value of metric averaged over autoscaled pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
     }
     
     // configuration of a horizontal pod autoscaler.
     message HorizontalPodAutoscaler {
       // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
       // +optional
    @@ -168,7 +168,7 @@ message HorizontalPodAutoscalerCondition {
       // lastTransitionTime is the last time the condition transitioned from
       // one status to another
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // reason is the reason for the condition's last transition.
       // +optional
    @@ -184,7 +184,7 @@ message HorizontalPodAutoscalerCondition {
     message HorizontalPodAutoscalerList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of horizontal pod autoscaler objects.
       repeated HorizontalPodAutoscaler items = 2;
    @@ -222,7 +222,7 @@ message HorizontalPodAutoscalerStatus {
       // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods;
       // used by the autoscaler to control how often the number of pods is changed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
     
       // currentReplicas is the current number of replicas of pods managed by this autoscaler.
       optional int32 currentReplicas = 3;
    @@ -336,18 +336,18 @@ message ObjectMetricSource {
       optional string metricName = 2;
     
       // targetValue is the target value of the metric (as a quantity).
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric.
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // averageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
     }
     
     // ObjectMetricStatus indicates the current value of a metric describing a
    @@ -360,18 +360,18 @@ message ObjectMetricStatus {
       optional string metricName = 2;
     
       // currentValue is the current value of the metric (as a quantity).
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // averageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
     }
     
     // PodsMetricSource indicates how to scale on a metric describing each pod in
    @@ -384,13 +384,13 @@ message PodsMetricSource {
     
       // targetAverageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     }
     
     // PodsMetricStatus indicates the current value of a metric describing each pod in
    @@ -401,13 +401,13 @@ message PodsMetricStatus {
     
       // currentAverageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     }
     
     // ResourceMetricSource indicates how to scale on a resource metric known to
    @@ -431,7 +431,7 @@ message ResourceMetricSource {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
     }
     
     // ResourceMetricStatus indicates the current value of a resource metric known to
    @@ -455,14 +455,14 @@ message ResourceMetricStatus {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // It will always be set, regardless of the corresponding metric specification.
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
     }
     
     // Scale represents a scaling request for a resource.
     message Scale {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
       // +optional
    diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
    index 4508290176..b31425b3b7 100644
    --- a/vendor/k8s.io/api/autoscaling/v1/types.go
    +++ b/vendor/k8s.io/api/autoscaling/v1/types.go
    @@ -83,6 +83,7 @@ type HorizontalPodAutoscalerStatus struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // configuration of a horizontal pod autoscaler.
     type HorizontalPodAutoscaler struct {
    @@ -101,6 +102,7 @@ type HorizontalPodAutoscaler struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // list of horizontal pod autoscaler objects.
     type HorizontalPodAutoscalerList struct {
    @@ -114,6 +116,7 @@ type HorizontalPodAutoscalerList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // Scale represents a scaling request for a resource.
     type Scale struct {
    diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..36d86a5ec3
    --- /dev/null
    +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,40 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Scale) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go
    index f96a059b6c..aafa2d4de2 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/doc.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go
    @@ -17,5 +17,6 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     package v2 // import "k8s.io/api/autoscaling/v2"
    diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto
    index a9e36975fc..8f2ee58031 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto
    @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler {
       // metadata is the standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the specification for the behaviour of the autoscaler.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
    @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition {
       // lastTransitionTime is the last time the condition transitioned from
       // one status to another
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // reason is the reason for the condition's last transition.
       // +optional
    @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition {
     message HorizontalPodAutoscalerList {
       // metadata is the standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of horizontal pod autoscaler objects.
       repeated HorizontalPodAutoscaler items = 2;
    @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus {
       // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
       // used by the autoscaler to control how often the number of pods is changed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
     
       // currentReplicas is current number of replicas of pods managed by this autoscaler,
       // as last seen by the autoscaler.
    @@ -293,7 +293,7 @@ message MetricIdentifier {
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     }
     
     // MetricSpec specifies how to scale based on a single metric
    @@ -393,12 +393,12 @@ message MetricTarget {
     
       // value is the target value of the metric (as a quantity).
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
     
       // averageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3;
     
       // averageUtilization is the target value of the average of the
       // resource metric across all relevant pods, represented as a percentage of
    @@ -412,12 +412,12 @@ message MetricTarget {
     message MetricValueStatus {
       // value is the current value of the metric (as a quantity).
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
     
       // averageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2;
     
       // currentAverageUtilization is the current value of the average of the
       // resource metric across all relevant pods, represented as a percentage of
    diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go
    index c12a83df1b..69a7b27012 100644
    --- a/vendor/k8s.io/api/autoscaling/v2/types.go
    +++ b/vendor/k8s.io/api/autoscaling/v2/types.go
    @@ -26,6 +26,7 @@ import (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.23
     
     // HorizontalPodAutoscaler is the configuration for a horizontal pod
     // autoscaler, which automatically manages the replica count of any resource
    @@ -573,6 +574,7 @@ type MetricValueStatus struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.23
     
     // HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
     type HorizontalPodAutoscalerList struct {
    diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..99ae748651
    --- /dev/null
    +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v2
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 23
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 23
    +}
    diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
    index e2119d5550..232a598158 100644
    --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
    @@ -51,7 +51,7 @@ message ContainerResourceMetricSource {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
     
       // container is the name of the container in the pods of the scaling target
       optional string container = 4;
    @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // It will always be set, regardless of the corresponding metric specification.
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
     
       // container is the name of the container in the pods of the scaling target
       optional string container = 4;
    @@ -108,17 +108,17 @@ message ExternalMetricSource {
       // metricSelector is used to identify a specific time series
       // within a given metric.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
     
       // targetValue is the target value of the metric (as a quantity).
       // Mutually exclusive with TargetAverageValue.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
     
       // targetAverageValue is the target per-pod value of global metric (as a quantity).
       // Mutually exclusive with TargetValue.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
     }
     
     // ExternalMetricStatus indicates the current value of a global metric
    @@ -131,14 +131,14 @@ message ExternalMetricStatus {
       // metricSelector is used to identify a specific time series
       // within a given metric.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
     
       // currentValue is the current value of the metric (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
     
       // currentAverageValue is the current value of metric averaged over autoscaled pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
     }
     
     // HorizontalPodAutoscaler is the configuration for a horizontal pod
    @@ -148,7 +148,7 @@ message HorizontalPodAutoscaler {
       // metadata is the standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the specification for the behaviour of the autoscaler.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
    @@ -172,7 +172,7 @@ message HorizontalPodAutoscalerCondition {
       // lastTransitionTime is the last time the condition transitioned from
       // one status to another
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // reason is the reason for the condition's last transition.
       // +optional
    @@ -188,7 +188,7 @@ message HorizontalPodAutoscalerCondition {
     message HorizontalPodAutoscalerList {
       // metadata is the standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of horizontal pod autoscaler objects.
       repeated HorizontalPodAutoscaler items = 2;
    @@ -233,7 +233,7 @@ message HorizontalPodAutoscalerStatus {
       // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
       // used by the autoscaler to control how often the number of pods is changed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
     
       // currentReplicas is current number of replicas of pods managed by this autoscaler,
       // as last seen by the autoscaler.
    @@ -355,18 +355,18 @@ message ObjectMetricSource {
       optional string metricName = 2;
     
       // targetValue is the target value of the metric (as a quantity).
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // averageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
     }
     
     // ObjectMetricStatus indicates the current value of a metric describing a
    @@ -379,18 +379,18 @@ message ObjectMetricStatus {
       optional string metricName = 2;
     
       // currentValue is the current value of the metric (as a quantity).
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // averageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
     }
     
     // PodsMetricSource indicates how to scale on a metric describing each pod in
    @@ -403,13 +403,13 @@ message PodsMetricSource {
     
       // targetAverageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     }
     
     // PodsMetricStatus indicates the current value of a metric describing each pod in
    @@ -420,13 +420,13 @@ message PodsMetricStatus {
     
       // currentAverageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
     
       // selector is the string-encoded form of a standard kubernetes label selector for the given metric
       // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
     }
     
     // ResourceMetricSource indicates how to scale on a resource metric known to
    @@ -450,7 +450,7 @@ message ResourceMetricSource {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
     }
     
     // ResourceMetricStatus indicates the current value of a resource metric known to
    @@ -474,6 +474,6 @@ message ResourceMetricStatus {
       // resource metric across all relevant pods, as a raw value (instead of as
       // a percentage of the request), similar to the "pods" metric source type.
       // It will always be set, regardless of the corresponding metric specification.
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
     }
     
    diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
    index 41f7a16ea1..c88fc1fe26 100644
    --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
    +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
    @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler {
       // metadata is the standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the specification for the behaviour of the autoscaler.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
    @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition {
       // lastTransitionTime is the last time the condition transitioned from
       // one status to another
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // reason is the reason for the condition's last transition.
       // +optional
    @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition {
     message HorizontalPodAutoscalerList {
       // metadata is the standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of horizontal pod autoscaler objects.
       repeated HorizontalPodAutoscaler items = 2;
    @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus {
       // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
       // used by the autoscaler to control how often the number of pods is changed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
     
       // currentReplicas is current number of replicas of pods managed by this autoscaler,
       // as last seen by the autoscaler.
    @@ -289,7 +289,7 @@ message MetricIdentifier {
       // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
       // When unset, just the metricName will be used to gather metrics.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     }
     
     // MetricSpec specifies how to scale based on a single metric
    @@ -389,12 +389,12 @@ message MetricTarget {
     
       // value is the target value of the metric (as a quantity).
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
     
       // averageValue is the target value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3;
     
       // averageUtilization is the target value of the average of the
       // resource metric across all relevant pods, represented as a percentage of
    @@ -408,12 +408,12 @@ message MetricTarget {
     message MetricValueStatus {
       // value is the current value of the metric (as a quantity).
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
     
       // averageValue is the current value of the average of the
       // metric across all relevant pods (as a quantity)
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2;
     
       // averageUtilization is the current value of the average of the
       // resource metric across all relevant pods, represented as a percentage of
    diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go
    index c4a8db6e78..cb5cbb6002 100644
    --- a/vendor/k8s.io/api/batch/v1/doc.go
    +++ b/vendor/k8s.io/api/batch/v1/doc.go
    @@ -17,5 +17,5 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     package v1 // import "k8s.io/api/batch/v1"
    diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
    index 833b118d00..f5a9385f5e 100644
    --- a/vendor/k8s.io/api/batch/v1/generated.proto
    +++ b/vendor/k8s.io/api/batch/v1/generated.proto
    @@ -34,7 +34,7 @@ message CronJob {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of a cron job, including the schedule.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -52,7 +52,7 @@ message CronJobList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CronJobs.
       repeated CronJob items = 2;
    @@ -113,15 +113,15 @@ message CronJobStatus {
       // A list of pointers to currently running jobs.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.ObjectReference active = 1;
    +  repeated .k8s.io.api.core.v1.ObjectReference active = 1;
     
       // Information when was the last time the job was successfully scheduled.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
     
       // Information when was the last time the job successfully completed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5;
     }
     
     // Job represents the configuration of a single job.
    @@ -129,7 +129,7 @@ message Job {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of a job.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -152,11 +152,11 @@ message JobCondition {
     
       // Last time the condition was checked.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
     
       // Last time the condition transit from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // (brief) reason for the condition's last transition.
       // +optional
    @@ -172,7 +172,7 @@ message JobList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of Jobs.
       repeated Job items = 2;
    @@ -213,8 +213,6 @@ message JobSpec {
       // checked against the backoffLimit. This field cannot be used in combination
       // with restartPolicy=OnFailure.
       //
    -  // This field is beta-level. It can be used when the `JobPodFailurePolicy`
    -  // feature gate is enabled (enabled by default).
       // +optional
       optional PodFailurePolicy podFailurePolicy = 11;
     
    @@ -224,8 +222,8 @@ message JobSpec {
       // When the field is specified, it must be immutable and works only for the Indexed Jobs.
       // Once the Job meets the SuccessPolicy, the lingering pods are terminated.
       //
    -  // This field  is alpha-level. To use this field, you must enable the
    -  // `JobSuccessPolicy` feature gate (disabled by default).
    +  // This field is beta-level. To use this field, you must enable the
    +  // `JobSuccessPolicy` feature gate (enabled by default).
       // +optional
       optional SuccessPolicy successPolicy = 16;
     
    @@ -262,7 +260,7 @@ message JobSpec {
       // Normally, the system sets this field for you.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // manualSelector controls generation of pod labels and pod selectors.
       // Leave `manualSelector` unset unless you are certain what you are doing.
    @@ -280,7 +278,7 @@ message JobSpec {
       // Describes the pod that will be created when executing a job.
       // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 6;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 6;
     
       // ttlSecondsAfterFinished limits the lifetime of a Job that has finished
       // execution (either Complete or Failed). If this field is set,
    @@ -349,7 +347,8 @@ message JobSpec {
       // The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
       // all characters before the first "/" must be a valid subdomain as defined
       // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
    -  // characters as defined by RFC 3986. The value cannot exceed 64 characters.
    +  // characters as defined by RFC 3986. The value cannot exceed 63 characters.
    +  // This field is immutable.
       //
       // This field is alpha-level. The job controller accepts setting the field
       // when the feature gate JobManagedBy is enabled (disabled by default).
    @@ -387,7 +386,7 @@ message JobStatus {
       // The field cannot be modified while the job is unsuspended or finished.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
     
       // Represents time when the job was completed. It is not guaranteed to
       // be set in happens-before order across separate operations.
    @@ -396,7 +395,7 @@ message JobStatus {
       // The value cannot be updated or removed. The value indicates the same or
       // later point in time as the startTime field.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3;
     
       // The number of pending and running pods which are not terminating (without
       // a deletionTimestamp).
    @@ -466,8 +465,8 @@ message JobStatus {
       // +optional
       optional UncountedTerminatedPods uncountedTerminatedPods = 8;
     
    -  // The number of pods which have a Ready condition.
    -  // +optional
    +  // The number of active pods which have a Ready condition and are not
    +  // terminating (without a deletionTimestamp).
       optional int32 ready = 9;
     }
     
    @@ -476,7 +475,7 @@ message JobTemplateSpec {
       // Standard object's metadata of the jobs created from this template.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the job.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
    index 49b0ec6441..b42ec231e4 100644
    --- a/vendor/k8s.io/api/batch/v1/types.go
    +++ b/vendor/k8s.io/api/batch/v1/types.go
    @@ -64,6 +64,7 @@ const (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // Job represents the configuration of a single job.
     type Job struct {
    @@ -85,6 +86,7 @@ type Job struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // JobList is a collection of jobs.
     type JobList struct {
    @@ -174,7 +176,7 @@ type PodFailurePolicyOnExitCodesRequirement struct {
     	// When specified, it should match one the container or initContainer
     	// names in the pod template.
     	// +optional
    -	ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"`
    +	ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
     
     	// Represents the relationship between the container exit code(s) and the
     	// specified values. Containers completed with success (exit code 0) are
    @@ -234,14 +236,14 @@ type PodFailurePolicyRule struct {
     
     	// Represents the requirement on the container exit codes.
     	// +optional
    -	OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"`
    +	OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"`
     
     	// Represents the requirement on the pod conditions. The requirement is represented
     	// as a list of pod condition patterns. The requirement is satisfied if at
     	// least one pattern matches an actual pod condition. At most 20 elements are allowed.
     	// +listType=atomic
     	// +optional
    -	OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"`
    +	OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"`
     }
     
     // PodFailurePolicy describes how failed pods influence the backoffLimit.
    @@ -336,8 +338,6 @@ type JobSpec struct {
     	// checked against the backoffLimit. This field cannot be used in combination
     	// with restartPolicy=OnFailure.
     	//
    -	// This field is beta-level. It can be used when the `JobPodFailurePolicy`
    -	// feature gate is enabled (enabled by default).
     	// +optional
     	PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"`
     
    @@ -347,8 +347,8 @@ type JobSpec struct {
     	// When the field is specified, it must be immutable and works only for the Indexed Jobs.
     	// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
     	//
    -	// This field  is alpha-level. To use this field, you must enable the
    -	// `JobSuccessPolicy` feature gate (disabled by default).
    +	// This field is beta-level. To use this field, you must enable the
    +	// `JobSuccessPolicy` feature gate (enabled by default).
     	// +optional
     	SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
     
    @@ -477,7 +477,8 @@ type JobSpec struct {
     	// The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
     	// all characters before the first "/" must be a valid subdomain as defined
     	// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
    -	// characters as defined by RFC 3986. The value cannot exceed 64 characters.
    +	// characters as defined by RFC 3986. The value cannot exceed 63 characters.
    +	// This field is immutable.
     	//
     	// This field is alpha-level. The job controller accepts setting the field
     	// when the feature gate JobManagedBy is enabled (disabled by default).
    @@ -594,8 +595,8 @@ type JobStatus struct {
     	// +optional
     	UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"`
     
    -	// The number of pods which have a Ready condition.
    -	// +optional
    +	// The number of active pods which have a Ready condition and are not
    +	// terminating (without a deletionTimestamp).
     	Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"`
     }
     
    @@ -633,7 +634,6 @@ const (
     	// JobReasonPodFailurePolicy reason indicates a job failure condition is added due to
     	// a failed pod matching a pod failure policy rule
     	// https://kep.k8s.io/3329
    -	// This is currently a beta field.
     	JobReasonPodFailurePolicy string = "PodFailurePolicy"
     	// JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of
     	// times higher than backOffLimit times.
    @@ -649,8 +649,13 @@ const (
     	// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
     	// a Job met successPolicy.
     	// https://kep.k8s.io/3998
    -	// This is currently an alpha field.
    +	// This is currently a beta field.
     	JobReasonSuccessPolicy string = "SuccessPolicy"
    +	// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
    +	// a number of succeeded Job pods met completions.
    +	// - https://kep.k8s.io/3998
    +	// This is currently a beta field.
    +	JobReasonCompletionsReached string = "CompletionsReached"
     )
     
     // JobCondition describes current state of a job.
    @@ -688,6 +693,7 @@ type JobTemplateSpec struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // CronJob represents the configuration of a single cron job.
     type CronJob struct {
    @@ -709,6 +715,7 @@ type CronJob struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // CronJobList is a collection of cron jobs.
     type CronJobList struct {
    diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    index 84073b8d86..d504887884 100644
    --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
    @@ -115,8 +115,8 @@ var map_JobSpec = map[string]string{
     	"parallelism":             "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
     	"completions":             "Specifies the desired number of successfully finished pods the job should be run with.  Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value.  Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
     	"activeDeadlineSeconds":   "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
    -	"podFailurePolicy":        "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).",
    -	"successPolicy":           "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field  is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default).",
    +	"podFailurePolicy":        "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
    +	"successPolicy":           "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).",
     	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6",
     	"backoffLimitPerIndex":    "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
     	"maxFailedIndexes":        "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
    @@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{
     	"completionMode":          "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
     	"suspend":                 "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
     	"podReplacementPolicy":    "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n  when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n  Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
    -	"managedBy":               "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).",
    +	"managedBy":               "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).",
     }
     
     func (JobSpec) SwaggerDoc() map[string]string {
    @@ -146,7 +146,7 @@ var map_JobStatus = map[string]string{
     	"completedIndexes":        "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
     	"failedIndexes":           "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
     	"uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n    counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.",
    -	"ready":                   "The number of pods which have a Ready condition.",
    +	"ready":                   "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).",
     }
     
     func (JobStatus) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..b76cb09249
    --- /dev/null
    +++ b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,46 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CronJob) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CronJobList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Job) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *JobList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto
    index ac774f19ad..6dd322128d 100644
    --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto
    @@ -35,7 +35,7 @@ message CronJob {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of a cron job, including the schedule.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -53,7 +53,7 @@ message CronJobList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CronJobs.
       repeated CronJob items = 2;
    @@ -116,15 +116,15 @@ message CronJobStatus {
       // A list of pointers to currently running jobs.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.ObjectReference active = 1;
    +  repeated .k8s.io.api.core.v1.ObjectReference active = 1;
     
       // Information when was the last time the job was successfully scheduled.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
     
       // Information when was the last time the job successfully completed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5;
     }
     
     // JobTemplateSpec describes the data a Job should have when created from a template
    @@ -132,11 +132,11 @@ message JobTemplateSpec {
       // Standard object's metadata of the jobs created from this template.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the job.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
       // +optional
    -  optional k8s.io.api.batch.v1.JobSpec spec = 2;
    +  optional .k8s.io.api.batch.v1.JobSpec spec = 2;
     }
     
    diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go
    index fe3ea3af87..78434478e8 100644
    --- a/vendor/k8s.io/api/certificates/v1/doc.go
    +++ b/vendor/k8s.io/api/certificates/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=certificates.k8s.io
     
     package v1 // import "k8s.io/api/certificates/v1"
    diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto
    index 968cc2564c..dac7c7f5f2 100644
    --- a/vendor/k8s.io/api/certificates/v1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1/generated.proto
    @@ -41,7 +41,7 @@ option go_package = "k8s.io/api/certificates/v1";
     // or to obtain certificates from custom non-Kubernetes signers.
     message CertificateSigningRequest {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec contains the certificate request, and is immutable after creation.
       // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
    @@ -87,19 +87,19 @@ message CertificateSigningRequestCondition {
     
       // lastUpdateTime is the time of the last update to this condition
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
     
       // lastTransitionTime is the time the condition last transitioned from one status to another.
       // If unset, when a new condition type is added or an existing condition's status is changed,
       // the server defaults this to the current time.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5;
     }
     
     // CertificateSigningRequestList is a collection of CertificateSigningRequest objects
     message CertificateSigningRequestList {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a collection of CertificateSigningRequest objects
       repeated CertificateSigningRequest items = 2;
    diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go
    index 92b2018e76..ba8009840d 100644
    --- a/vendor/k8s.io/api/certificates/v1/types.go
    +++ b/vendor/k8s.io/api/certificates/v1/types.go
    @@ -27,6 +27,7 @@ import (
     // +genclient:nonNamespaced
     // +genclient:method=UpdateApproval,verb=update,subresource=approval,input=k8s.io/api/certificates/v1.CertificateSigningRequest,result=k8s.io/api/certificates/v1.CertificateSigningRequest
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates
     // by submitting a certificate signing request, and having it asynchronously approved and issued.
    @@ -262,6 +263,7 @@ type CertificateSigningRequestCondition struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // CertificateSigningRequestList is a collection of CertificateSigningRequest objects
     type CertificateSigningRequestList struct {
    diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..3a2b274030
    --- /dev/null
    +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CertificateSigningRequest) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CertificateSigningRequestList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    index b0ebc4bd45..7155f778cf 100644
    --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
    @@ -46,7 +46,7 @@ option go_package = "k8s.io/api/certificates/v1alpha1";
     message ClusterTrustBundle {
       // metadata contains the object metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec contains the signer (if any) and trust anchors.
       optional ClusterTrustBundleSpec spec = 2;
    @@ -57,7 +57,7 @@ message ClusterTrustBundleList {
       // metadata contains the list metadata.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a collection of ClusterTrustBundle objects
       repeated ClusterTrustBundle items = 2;
    diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    index f70f01ef7a..f3ec4c06e4 100644
    --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
    @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/certificates/v1beta1";
     // Describes a certificate signing request
     message CertificateSigningRequest {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec contains the certificate request, and is immutable after creation.
       // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
    @@ -65,18 +65,18 @@ message CertificateSigningRequestCondition {
     
       // timestamp for the last update to this condition
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
     
       // lastTransitionTime is the time the condition last transitioned from one status to another.
       // If unset, when a new condition type is added or an existing condition's status is changed,
       // the server defaults this to the current time.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5;
     }
     
     message CertificateSigningRequestList {
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       repeated CertificateSigningRequest items = 2;
     }
    diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go
    index fc2f4f2c6e..9b2fbbda3a 100644
    --- a/vendor/k8s.io/api/coordination/v1/doc.go
    +++ b/vendor/k8s.io/api/coordination/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     // +groupName=coordination.k8s.io
     
    diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go
    index 8b7ab98caa..cf6702aef3 100644
    --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go
    @@ -139,40 +139,44 @@ func init() {
     }
     
     var fileDescriptor_239d5a4df3139dce = []byte{
    -	// 524 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
    -	0x18, 0xc6, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x19, 0x95, 0x90,
    -	0x26, 0x24, 0x1c, 0x3a, 0x21, 0x84, 0x38, 0x8d, 0x08, 0x01, 0x93, 0x3a, 0x21, 0x65, 0x3b, 0xa1,
    -	0x1d, 0x70, 0x93, 0x97, 0xd4, 0x74, 0x89, 0x83, 0xed, 0x16, 0xed, 0xc6, 0x47, 0xe0, 0xca, 0xc7,
    -	0x80, 0x4f, 0xd1, 0xe3, 0x8e, 0x3b, 0x45, 0xd4, 0x7c, 0x11, 0x64, 0xb7, 0x5b, 0x4b, 0xff, 0x68,
    -	0xd3, 0x6e, 0xf1, 0xeb, 0xe7, 0xf9, 0xbd, 0x8f, 0x9f, 0x43, 0xd0, 0x93, 0xc1, 0x4b, 0x81, 0x29,
    -	0xf3, 0x49, 0x4e, 0xfd, 0x88, 0x31, 0x1e, 0xd3, 0x8c, 0x48, 0xca, 0x32, 0x7f, 0xd4, 0xf1, 0x13,
    -	0xc8, 0x80, 0x13, 0x09, 0x31, 0xce, 0x39, 0x93, 0xcc, 0x6e, 0x4d, 0xb5, 0x98, 0xe4, 0x14, 0x2f,
    -	0x6a, 0xf1, 0xa8, 0xd3, 0x7a, 0x9a, 0x50, 0xd9, 0x1f, 0xf6, 0x70, 0xc4, 0x52, 0x3f, 0x61, 0x09,
    -	0xf3, 0x8d, 0xa5, 0x37, 0xfc, 0x6c, 0x4e, 0xe6, 0x60, 0xbe, 0xa6, 0xa8, 0xd6, 0xf3, 0xf9, 0xda,
    -	0x94, 0x44, 0x7d, 0x9a, 0x01, 0xbf, 0xf0, 0xf3, 0x41, 0xa2, 0x07, 0xc2, 0x4f, 0x41, 0x92, 0x35,
    -	0x01, 0x5a, 0xfe, 0x26, 0x17, 0x1f, 0x66, 0x92, 0xa6, 0xb0, 0x62, 0x78, 0x71, 0x9b, 0x41, 0x44,
    -	0x7d, 0x48, 0xc9, 0xb2, 0xaf, 0xfd, 0xdb, 0x42, 0xd5, 0x2e, 0x10, 0x01, 0xf6, 0x27, 0xb4, 0xad,
    -	0xd3, 0xc4, 0x44, 0x12, 0xc7, 0xda, 0xb3, 0xf6, 0xeb, 0x07, 0xcf, 0xf0, 0xbc, 0x86, 0x1b, 0x28,
    -	0xce, 0x07, 0x89, 0x1e, 0x08, 0xac, 0xd5, 0x78, 0xd4, 0xc1, 0x1f, 0x7a, 0x5f, 0x20, 0x92, 0xc7,
    -	0x20, 0x49, 0x60, 0x8f, 0x0b, 0xaf, 0xa4, 0x0a, 0x0f, 0xcd, 0x67, 0xe1, 0x0d, 0xd5, 0x7e, 0x87,
    -	0x2a, 0x22, 0x87, 0xc8, 0xd9, 0x32, 0xf4, 0xc7, 0x78, 0x73, 0xc9, 0xd8, 0x44, 0x3a, 0xc9, 0x21,
    -	0x0a, 0x1e, 0xcc, 0x90, 0x15, 0x7d, 0x0a, 0x0d, 0xa0, 0xfd, 0xcb, 0x42, 0x35, 0xa3, 0xe8, 0x52,
    -	0x21, 0xed, 0xb3, 0x95, 0xe0, 0xf8, 0x6e, 0xc1, 0xb5, 0xdb, 0xc4, 0x6e, 0xcc, 0x76, 0x6c, 0x5f,
    -	0x4f, 0x16, 0x42, 0xbf, 0x45, 0x55, 0x2a, 0x21, 0x15, 0xce, 0xd6, 0x5e, 0x79, 0xbf, 0x7e, 0xf0,
    -	0xe8, 0xd6, 0xd4, 0xc1, 0xce, 0x8c, 0x56, 0x3d, 0xd2, 0xbe, 0x70, 0x6a, 0x6f, 0xff, 0x2c, 0xcf,
    -	0x32, 0xeb, 0x77, 0xd8, 0xaf, 0xd0, 0x6e, 0x9f, 0x9d, 0xc7, 0xc0, 0x8f, 0x62, 0xc8, 0x24, 0x95,
    -	0x17, 0x26, 0x79, 0x2d, 0xb0, 0x55, 0xe1, 0xed, 0xbe, 0xff, 0xef, 0x26, 0x5c, 0x52, 0xda, 0x5d,
    -	0xd4, 0x3c, 0xd7, 0xa0, 0x37, 0x43, 0x6e, 0x36, 0x9f, 0x40, 0xc4, 0xb2, 0x58, 0x98, 0x5a, 0xab,
    -	0x81, 0xa3, 0x0a, 0xaf, 0xd9, 0x5d, 0x73, 0x1f, 0xae, 0x75, 0xd9, 0x3d, 0x54, 0x27, 0xd1, 0xd7,
    -	0x21, 0xe5, 0x70, 0x4a, 0x53, 0x70, 0xca, 0xa6, 0x40, 0xff, 0x6e, 0x05, 0x1e, 0xd3, 0x88, 0x33,
    -	0x6d, 0x0b, 0x1e, 0xaa, 0xc2, 0xab, 0xbf, 0x9e, 0x73, 0xc2, 0x45, 0xa8, 0x7d, 0x86, 0x6a, 0x1c,
    -	0x32, 0xf8, 0x66, 0x36, 0x54, 0xee, 0xb7, 0x61, 0x47, 0x15, 0x5e, 0x2d, 0xbc, 0xa6, 0x84, 0x73,
    -	0xa0, 0x7d, 0x88, 0x1a, 0xe6, 0x65, 0xa7, 0x9c, 0x64, 0x82, 0xea, 0xb7, 0x09, 0xa7, 0x6a, 0xba,
    -	0x68, 0xaa, 0xc2, 0x6b, 0x74, 0x97, 0xee, 0xc2, 0x15, 0x75, 0x70, 0x38, 0x9e, 0xb8, 0xa5, 0xcb,
    -	0x89, 0x5b, 0xba, 0x9a, 0xb8, 0xa5, 0xef, 0xca, 0xb5, 0xc6, 0xca, 0xb5, 0x2e, 0x95, 0x6b, 0x5d,
    -	0x29, 0xd7, 0xfa, 0xa3, 0x5c, 0xeb, 0xc7, 0x5f, 0xb7, 0xf4, 0xb1, 0xb5, 0xf9, 0x07, 0xf2, 0x2f,
    -	0x00, 0x00, 0xff, 0xff, 0xb0, 0xb0, 0x3a, 0x46, 0x5d, 0x04, 0x00, 0x00,
    +	// 588 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40,
    +	0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98,
    +	0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32,
    +	0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81,
    +	0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b,
    +	0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff,
    +	0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70,
    +	0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09,
    +	0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79,
    +	0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a,
    +	0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06,
    +	0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e,
    +	0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04,
    +	0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32,
    +	0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf,
    +	0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b,
    +	0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26,
    +	0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e,
    +	0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed,
    +	0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a,
    +	0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d,
    +	0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62,
    +	0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c,
    +	0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7,
    +	0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb,
    +	0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b,
    +	0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86,
    +	0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37,
    +	0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9,
    +	0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde,
    +	0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb,
    +	0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a,
    +	0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d,
    +	0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c,
    +	0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7,
    +	0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b,
    +	0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00,
     }
     
     func (m *Lease) Marshal() (dAtA []byte, err error) {
    @@ -285,6 +289,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.PreferredHolder != nil {
    +		i -= len(*m.PreferredHolder)
    +		copy(dAtA[i:], *m.PreferredHolder)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder)))
    +		i--
    +		dAtA[i] = 0x3a
    +	}
    +	if m.Strategy != nil {
    +		i -= len(*m.Strategy)
    +		copy(dAtA[i:], *m.Strategy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy)))
    +		i--
    +		dAtA[i] = 0x32
    +	}
     	if m.LeaseTransitions != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions))
     		i--
    @@ -394,6 +412,14 @@ func (m *LeaseSpec) Size() (n int) {
     	if m.LeaseTransitions != nil {
     		n += 1 + sovGenerated(uint64(*m.LeaseTransitions))
     	}
    +	if m.Strategy != nil {
    +		l = len(*m.Strategy)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.PreferredHolder != nil {
    +		l = len(*m.PreferredHolder)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -440,6 +466,8 @@ func (this *LeaseSpec) String() string {
     		`AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`,
     		`RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
     		`LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`,
    +		`Strategy:` + valueToStringGenerated(this.Strategy) + `,`,
    +		`PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -859,6 +887,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.LeaseTransitions = &v
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])
    +			m.Strategy = &s
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.PreferredHolder = &s
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto
    index 36fce60f2d..4d4f7e08f4 100644
    --- a/vendor/k8s.io/api/coordination/v1/generated.proto
    +++ b/vendor/k8s.io/api/coordination/v1/generated.proto
    @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/coordination/v1";
     message Lease {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec contains the specification of the Lease.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -45,7 +45,7 @@ message LeaseList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated Lease items = 2;
    @@ -54,27 +54,43 @@ message LeaseList {
     // LeaseSpec is a specification of a Lease.
     message LeaseSpec {
       // holderIdentity contains the identity of the holder of a current lease.
    +  // If Coordinated Leader Election is used, the holder identity must be
    +  // equal to the elected LeaseCandidate.metadata.name field.
       // +optional
       optional string holderIdentity = 1;
     
       // leaseDurationSeconds is a duration that candidates for a lease need
    -  // to wait to force acquire it. This is measure against time of last
    +  // to wait to force acquire it. This is measured against the time of last
       // observed renewTime.
       // +optional
       optional int32 leaseDurationSeconds = 2;
     
       // acquireTime is a time when the current lease was acquired.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3;
     
       // renewTime is a time when the current holder of a lease has last
       // updated the lease.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4;
     
       // leaseTransitions is the number of transitions of a lease between
       // holders.
       // +optional
       optional int32 leaseTransitions = 5;
    +
    +  // Strategy indicates the strategy for picking the leader for coordinated leader election.
    +  // If the field is not specified, there is no active coordination for this lease.
    +  // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +  // +featureGate=CoordinatedLeaderElection
    +  // +optional
    +  optional string strategy = 6;
    +
    +  // PreferredHolder signals to a lease holder that the lease has a
    +  // more optimal holder and should be given up.
    +  // This field can only be set if Strategy is also set.
    +  // +featureGate=CoordinatedLeaderElection
    +  // +optional
    +  optional string preferredHolder = 7;
     }
     
    diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go
    index b0e1d06829..5307cea88f 100644
    --- a/vendor/k8s.io/api/coordination/v1/types.go
    +++ b/vendor/k8s.io/api/coordination/v1/types.go
    @@ -20,8 +20,21 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    +type CoordinatedLeaseStrategy string
    +
    +// CoordinatedLeaseStrategy defines the strategy for picking the leader for coordinated leader election.
    +const (
    +	// OldestEmulationVersion picks the oldest LeaseCandidate, where "oldest" is defined as follows
    +	// 1) Select the candidate(s) with the lowest emulation version
    +	// 2) If multiple candidates have the same emulation version, select the candidate(s) with the lowest binary version. (Note that binary version must be greater or equal to emulation version)
    +	// 3) If multiple candidates have the same binary version, select the candidate with the oldest creationTimestamp.
    +	// If a candidate does not specify the emulationVersion and binaryVersion fields, it will not be considered a candidate for the lease.
    +	OldestEmulationVersion CoordinatedLeaseStrategy = "OldestEmulationVersion"
    +)
    +
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.14
     
     // Lease defines a lease concept.
     type Lease struct {
    @@ -39,10 +52,12 @@ type Lease struct {
     // LeaseSpec is a specification of a Lease.
     type LeaseSpec struct {
     	// holderIdentity contains the identity of the holder of a current lease.
    +	// If Coordinated Leader Election is used, the holder identity must be
    +	// equal to the elected LeaseCandidate.metadata.name field.
     	// +optional
     	HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"`
     	// leaseDurationSeconds is a duration that candidates for a lease need
    -	// to wait to force acquire it. This is measure against time of last
    +	// to wait to force acquire it. This is measured against the time of last
     	// observed renewTime.
     	// +optional
     	LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"`
    @@ -57,9 +72,22 @@ type LeaseSpec struct {
     	// holders.
     	// +optional
     	LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"`
    +	// Strategy indicates the strategy for picking the leader for coordinated leader election.
    +	// If the field is not specified, there is no active coordination for this lease.
    +	// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +	// +featureGate=CoordinatedLeaderElection
    +	// +optional
    +	Strategy *CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
    +	// PreferredHolder signals to a lease holder that the lease has a
    +	// more optimal holder and should be given up.
    +	// This field can only be set if Strategy is also set.
    +	// +featureGate=CoordinatedLeaderElection
    +	// +optional
    +	PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"`
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // LeaseList is a list of Lease objects.
     type LeaseList struct {
    diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go
    index f3720eca02..6c1a7ea8b9 100644
    --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go
    @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string {
     
     var map_LeaseSpec = map[string]string{
     	"":                     "LeaseSpec is a specification of a Lease.",
    -	"holderIdentity":       "holderIdentity contains the identity of the holder of a current lease.",
    -	"leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.",
    +	"holderIdentity":       "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.",
    +	"leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.",
     	"acquireTime":          "acquireTime is a time when the current lease was acquired.",
     	"renewTime":            "renewTime is a time when the current holder of a lease has last updated the lease.",
     	"leaseTransitions":     "leaseTransitions is the number of transitions of a lease between holders.",
    +	"strategy":             "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
    +	"preferredHolder":      "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.",
     }
     
     func (LeaseSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go
    index 99f6b0be7a..4d549cc99f 100644
    --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go
    @@ -111,6 +111,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) {
     		*out = new(int32)
     		**out = **in
     	}
    +	if in.Strategy != nil {
    +		in, out := &in.Strategy, &out.Strategy
    +		*out = new(CoordinatedLeaseStrategy)
    +		**out = **in
    +	}
    +	if in.PreferredHolder != nil {
    +		in, out := &in.PreferredHolder, &out.PreferredHolder
    +		*out = new(string)
    +		**out = **in
    +	}
     	return
     }
     
    diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..a22632cba9
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Lease) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 14
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LeaseList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/doc.go b/vendor/k8s.io/api/coordination/v1alpha1/doc.go
    new file mode 100644
    index 0000000000..33a0b0ea97
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/doc.go
    @@ -0,0 +1,24 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// +k8s:deepcopy-gen=package
    +// +k8s:protobuf-gen=package
    +// +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
    +
    +// +groupName=coordination.k8s.io
    +
    +package v1alpha1 // import "k8s.io/api/coordination/v1alpha1"
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go
    new file mode 100644
    index 0000000000..9e072e62d0
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go
    @@ -0,0 +1,1036 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by protoc-gen-gogo. DO NOT EDIT.
    +// source: k8s.io/api/coordination/v1alpha1/generated.proto
    +
    +package v1alpha1
    +
    +import (
    +	fmt "fmt"
    +
    +	io "io"
    +
    +	proto "github.com/gogo/protobuf/proto"
    +
    +	k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +
    +	math "math"
    +	math_bits "math/bits"
    +	reflect "reflect"
    +	strings "strings"
    +)
    +
    +// Reference imports to suppress errors if they are not otherwise used.
    +var _ = proto.Marshal
    +var _ = fmt.Errorf
    +var _ = math.Inf
    +
    +// This is a compile-time assertion to ensure that this generated file
    +// is compatible with the proto package it is being compiled against.
    +// A compilation error at this line likely means your copy of the
    +// proto package needs to be updated.
    +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    +
    +func (m *LeaseCandidate) Reset()      { *m = LeaseCandidate{} }
    +func (*LeaseCandidate) ProtoMessage() {}
    +func (*LeaseCandidate) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_cb9e87df9da593c2, []int{0}
    +}
    +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidate) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidate.Merge(m, src)
    +}
    +func (m *LeaseCandidate) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidate) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidate.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
    +
    +func (m *LeaseCandidateList) Reset()      { *m = LeaseCandidateList{} }
    +func (*LeaseCandidateList) ProtoMessage() {}
    +func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_cb9e87df9da593c2, []int{1}
    +}
    +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidateList.Merge(m, src)
    +}
    +func (m *LeaseCandidateList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidateList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
    +
    +func (m *LeaseCandidateSpec) Reset()      { *m = LeaseCandidateSpec{} }
    +func (*LeaseCandidateSpec) ProtoMessage() {}
    +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_cb9e87df9da593c2, []int{2}
    +}
    +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LeaseCandidateSpec.Merge(m, src)
    +}
    +func (m *LeaseCandidateSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
    +
    +func init() {
    +	proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate")
    +	proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList")
    +	proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec")
    +}
    +
    +func init() {
    +	proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2)
    +}
    +
    +var fileDescriptor_cb9e87df9da593c2 = []byte{
    +	// 570 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c,
    +	0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65,
    +	0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25,
    +	0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00,
    +	0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94,
    +	0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b,
    +	0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6,
    +	0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f,
    +	0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38,
    +	0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22,
    +	0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80,
    +	0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd,
    +	0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f,
    +	0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5,
    +	0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75,
    +	0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71,
    +	0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f,
    +	0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf,
    +	0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c,
    +	0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0,
    +	0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7,
    +	0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f,
    +	0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61,
    +	0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c,
    +	0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5,
    +	0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e,
    +	0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b,
    +	0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8,
    +	0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5,
    +	0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7,
    +	0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda,
    +	0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5,
    +	0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb,
    +	0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda,
    +	0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00,
    +	0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00,
    +}
    +
    +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.PreferredStrategies) > 0 {
    +		for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.PreferredStrategies[iNdEx])
    +			copy(dAtA[i:], m.PreferredStrategies[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx])))
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	i -= len(m.EmulationVersion)
    +	copy(dAtA[i:], m.EmulationVersion)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.BinaryVersion)
    +	copy(dAtA[i:], m.BinaryVersion)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion)))
    +	i--
    +	dAtA[i] = 0x22
    +	if m.RenewTime != nil {
    +		{
    +			size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if m.PingTime != nil {
    +		{
    +			size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.LeaseName)
    +	copy(dAtA[i:], m.LeaseName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *LeaseCandidate) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *LeaseCandidateList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *LeaseCandidateSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.LeaseName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.PingTime != nil {
    +		l = m.PingTime.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.RenewTime != nil {
    +		l = m.RenewTime.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	l = len(m.BinaryVersion)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.EmulationVersion)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.PreferredStrategies) > 0 {
    +		for _, s := range m.PreferredStrategies {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *LeaseCandidate) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&LeaseCandidate{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *LeaseCandidateList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]LeaseCandidate{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&LeaseCandidateList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *LeaseCandidateSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&LeaseCandidateSpec{`,
    +		`LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`,
    +		`PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`,
    +		`RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
    +		`BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`,
    +		`EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`,
    +		`PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, LeaseCandidate{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.LeaseName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.PingTime == nil {
    +				m.PingTime = &v1.MicroTime{}
    +			}
    +			if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.RenewTime == nil {
    +				m.RenewTime = &v1.MicroTime{}
    +			}
    +			if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.BinaryVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.EmulationVersion = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func skipGenerated(dAtA []byte) (n int, err error) {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	depth := 0
    +	for iNdEx < l {
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return 0, ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return 0, io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= (uint64(b) & 0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		wireType := int(wire & 0x7)
    +		switch wireType {
    +		case 0:
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				iNdEx++
    +				if dAtA[iNdEx-1] < 0x80 {
    +					break
    +				}
    +			}
    +		case 1:
    +			iNdEx += 8
    +		case 2:
    +			var length int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				length |= (int(b) & 0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if length < 0 {
    +				return 0, ErrInvalidLengthGenerated
    +			}
    +			iNdEx += length
    +		case 3:
    +			depth++
    +		case 4:
    +			if depth == 0 {
    +				return 0, ErrUnexpectedEndOfGroupGenerated
    +			}
    +			depth--
    +		case 5:
    +			iNdEx += 4
    +		default:
    +			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    +		}
    +		if iNdEx < 0 {
    +			return 0, ErrInvalidLengthGenerated
    +		}
    +		if depth == 0 {
    +			return iNdEx, nil
    +		}
    +	}
    +	return 0, io.ErrUnexpectedEOF
    +}
    +
    +var (
    +	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    +	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    +	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    +)
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto
    new file mode 100644
    index 0000000000..57895ad569
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto
    @@ -0,0 +1,105 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +
    +// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    +
    +syntax = "proto2";
    +
    +package k8s.io.api.coordination.v1alpha1;
    +
    +import "k8s.io/api/coordination/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    +
    +// Package-wide variables from generator "generated".
    +option go_package = "k8s.io/api/coordination/v1alpha1";
    +
    +// LeaseCandidate defines a candidate for a Lease object.
    +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
    +message LeaseCandidate {
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec contains the specification of the Lease.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional LeaseCandidateSpec spec = 2;
    +}
    +
    +// LeaseCandidateList is a list of Lease objects.
    +message LeaseCandidateList {
    +  // Standard list metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is a list of schema objects.
    +  repeated LeaseCandidate items = 2;
    +}
    +
    +// LeaseCandidateSpec is a specification of a Lease.
    +message LeaseCandidateSpec {
    +  // LeaseName is the name of the lease for which this candidate is contending.
    +  // This field is immutable.
    +  // +required
    +  optional string leaseName = 1;
    +
    +  // PingTime is the last time that the server has requested the LeaseCandidate
    +  // to renew. It is only done during leader election to check if any
    +  // LeaseCandidates have become ineligible. When PingTime is updated, the
    +  // LeaseCandidate will respond by updating RenewTime.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2;
    +
    +  // RenewTime is the time that the LeaseCandidate was last updated.
    +  // Any time a Lease needs to do leader election, the PingTime field
    +  // is updated to signal to the LeaseCandidate that they should update
    +  // the RenewTime.
    +  // Old LeaseCandidate objects are also garbage collected if it has been hours
    +  // since the last renew. The PingTime field is updated regularly to prevent
    +  // garbage collection for still active LeaseCandidates.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3;
    +
    +  // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
    +  // This field is required when strategy is "OldestEmulationVersion"
    +  // +optional
    +  optional string binaryVersion = 4;
    +
    +  // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
    +  // EmulationVersion must be less than or equal to BinaryVersion.
    +  // This field is required when strategy is "OldestEmulationVersion"
    +  // +optional
    +  optional string emulationVersion = 5;
    +
    +  // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election.
    +  // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated
    +  // leader election to make a decision about the final election strategy. This follows as
    +  // - If all clients have strategy X as the first element in this list, strategy X will be used.
    +  // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y
    +  //   will be used.
    +  // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader
    +  //   election will not operate the Lease until resolved.
    +  // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +  // +featureGate=CoordinatedLeaderElection
    +  // +listType=atomic
    +  // +required
    +  repeated string preferredStrategies = 6;
    +}
    +
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha1/register.go
    new file mode 100644
    index 0000000000..6e57905a19
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/register.go
    @@ -0,0 +1,53 @@
    +/*
    +Copyright 2018 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +import (
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +)
    +
    +// GroupName is the group name use in this package
    +const GroupName = "coordination.k8s.io"
    +
    +// SchemeGroupVersion is group version used to register these objects
    +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
    +
    +// Resource takes an unqualified resource and returns a Group qualified GroupResource
    +func Resource(resource string) schema.GroupResource {
    +	return SchemeGroupVersion.WithResource(resource).GroupResource()
    +}
    +
    +var (
    +	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
    +	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
    +	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
    +	localSchemeBuilder = &SchemeBuilder
    +	AddToScheme        = localSchemeBuilder.AddToScheme
    +)
    +
    +// Adds the list of known types to api.Scheme.
    +func addKnownTypes(scheme *runtime.Scheme) error {
    +	scheme.AddKnownTypes(SchemeGroupVersion,
    +		&LeaseCandidate{},
    +		&LeaseCandidateList{},
    +	)
    +
    +	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    +	return nil
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha1/types.go
    new file mode 100644
    index 0000000000..14066600cf
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/types.go
    @@ -0,0 +1,100 @@
    +/*
    +Copyright 2018 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +import (
    +	v1 "k8s.io/api/coordination/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +)
    +
    +// +genclient
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// LeaseCandidate defines a candidate for a Lease object.
    +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
    +type LeaseCandidate struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// spec contains the specification of the Lease.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// LeaseCandidateSpec is a specification of a Lease.
    +type LeaseCandidateSpec struct {
    +	// LeaseName is the name of the lease for which this candidate is contending.
    +	// This field is immutable.
    +	// +required
    +	LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"`
    +	// PingTime is the last time that the server has requested the LeaseCandidate
    +	// to renew. It is only done during leader election to check if any
    +	// LeaseCandidates have become ineligible. When PingTime is updated, the
    +	// LeaseCandidate will respond by updating RenewTime.
    +	// +optional
    +	PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"`
    +	// RenewTime is the time that the LeaseCandidate was last updated.
    +	// Any time a Lease needs to do leader election, the PingTime field
    +	// is updated to signal to the LeaseCandidate that they should update
    +	// the RenewTime.
    +	// Old LeaseCandidate objects are also garbage collected if it has been hours
    +	// since the last renew. The PingTime field is updated regularly to prevent
    +	// garbage collection for still active LeaseCandidates.
    +	// +optional
    +	RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"`
    +	// BinaryVersion is the binary version. It must be in a semver format without leading `v`.
    +	// This field is required when strategy is "OldestEmulationVersion"
    +	// +optional
    +	BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"`
    +	// EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
    +	// EmulationVersion must be less than or equal to BinaryVersion.
    +	// This field is required when strategy is "OldestEmulationVersion"
    +	// +optional
    +	EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"`
    +	// PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election.
    +	// The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated
    +	// leader election to make a decision about the final election strategy. This follows as
    +	// - If all clients have strategy X as the first element in this list, strategy X will be used.
    +	// - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y
    +	//   will be used.
    +	// - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader
    +	//   election will not operate the Lease until resolved.
    +	// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +	// +featureGate=CoordinatedLeaderElection
    +	// +listType=atomic
    +	// +required
    +	PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// LeaseCandidateList is a list of Lease objects.
    +type LeaseCandidateList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// items is a list of schema objects.
    +	Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go
    new file mode 100644
    index 0000000000..0e52809c8c
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go
    @@ -0,0 +1,64 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +// This file contains a collection of methods that can be used from go-restful to
    +// generate Swagger API documentation for its models. Please read this PR for more
    +// information on the implementation: https://github.com/emicklei/go-restful/pull/215
    +//
    +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
    +// they are on one line! For multiple line or blocks that you want to ignore use ---.
    +// Any context after a --- is ignored.
    +//
    +// Those methods can be generated by using hack/update-codegen.sh
    +
    +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    +var map_LeaseCandidate = map[string]string{
    +	"":         "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.",
    +	"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (LeaseCandidate) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidate
    +}
    +
    +var map_LeaseCandidateList = map[string]string{
    +	"":         "LeaseCandidateList is a list of Lease objects.",
    +	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is a list of schema objects.",
    +}
    +
    +func (LeaseCandidateList) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidateList
    +}
    +
    +var map_LeaseCandidateSpec = map[string]string{
    +	"":                    "LeaseCandidateSpec is a specification of a Lease.",
    +	"leaseName":           "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.",
    +	"pingTime":            "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.",
    +	"renewTime":           "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
    +	"binaryVersion":       "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"",
    +	"emulationVersion":    "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
    +	"preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n  will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n  election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
    +}
    +
    +func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
    +	return map_LeaseCandidateSpec
    +}
    +
    +// AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go
    new file mode 100644
    index 0000000000..9cf15d21dc
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go
    @@ -0,0 +1,116 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by deepcopy-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1 "k8s.io/api/coordination/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate.
    +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidate)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *LeaseCandidate) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]LeaseCandidate, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList.
    +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidateList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
    +	*out = *in
    +	if in.PingTime != nil {
    +		in, out := &in.PingTime, &out.PingTime
    +		*out = (*in).DeepCopy()
    +	}
    +	if in.RenewTime != nil {
    +		in, out := &in.RenewTime, &out.RenewTime
    +		*out = (*in).DeepCopy()
    +	}
    +	if in.PreferredStrategies != nil {
    +		in, out := &in.PreferredStrategies, &out.PreferredStrategies
    +		*out = make([]v1.CoordinatedLeaseStrategy, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec.
    +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LeaseCandidateSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..f42bef65c9
    --- /dev/null
    +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,58 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    index 655de56590..bea9b8146a 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
    @@ -25,6 +25,8 @@ import (
     	io "io"
     
     	proto "github.com/gogo/protobuf/proto"
    +
    +	k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     
     	math "math"
    @@ -139,40 +141,45 @@ func init() {
     }
     
     var fileDescriptor_8d4e223b8bb23da3 = []byte{
    -	// 527 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30,
    -	0x14, 0xc7, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x99, 0x7a, 0x40,
    -	0x13, 0x12, 0x36, 0x9d, 0x10, 0x42, 0x9c, 0x20, 0x02, 0x89, 0x89, 0x4c, 0x48, 0xd9, 0x4e, 0x68,
    -	0x07, 0xdc, 0xe4, 0x91, 0x9a, 0x2e, 0x71, 0x88, 0xdd, 0xa2, 0xdd, 0xf8, 0x08, 0x5c, 0xf9, 0x22,
    -	0xf0, 0x15, 0x7a, 0xdc, 0x71, 0xa7, 0x88, 0x9a, 0x2f, 0x82, 0xec, 0x76, 0x6b, 0x69, 0x87, 0x5a,
    -	0x71, 0x8b, 0x9f, 0xdf, 0xef, 0xf7, 0xfe, 0x7e, 0x87, 0x20, 0x32, 0x7c, 0x26, 0x30, 0xe3, 0x84,
    -	0xe6, 0x8c, 0x44, 0x9c, 0x17, 0x31, 0xcb, 0xa8, 0x64, 0x3c, 0x23, 0xe3, 0x5e, 0x1f, 0x24, 0xed,
    -	0x91, 0x04, 0x32, 0x28, 0xa8, 0x84, 0x18, 0xe7, 0x05, 0x97, 0xdc, 0xf6, 0x66, 0x00, 0xa6, 0x39,
    -	0xc3, 0xcb, 0x00, 0x9e, 0x03, 0x9d, 0x47, 0x09, 0x93, 0x83, 0x51, 0x1f, 0x47, 0x3c, 0x25, 0x09,
    -	0x4f, 0x38, 0x31, 0x5c, 0x7f, 0xf4, 0xd1, 0x9c, 0xcc, 0xc1, 0x7c, 0xcd, 0x7c, 0x9d, 0x27, 0x8b,
    -	0x00, 0x29, 0x8d, 0x06, 0x2c, 0x83, 0xe2, 0x92, 0xe4, 0xc3, 0x44, 0x17, 0x04, 0x49, 0x41, 0x52,
    -	0x32, 0x5e, 0x4b, 0xd1, 0x21, 0xff, 0xa2, 0x8a, 0x51, 0x26, 0x59, 0x0a, 0x6b, 0xc0, 0xd3, 0x4d,
    -	0x80, 0x88, 0x06, 0x90, 0xd2, 0x55, 0xae, 0xfb, 0xd3, 0x42, 0xf5, 0x00, 0xa8, 0x00, 0xfb, 0x03,
    -	0xda, 0xd5, 0x69, 0x62, 0x2a, 0xa9, 0x63, 0x1d, 0x58, 0x87, 0xcd, 0xa3, 0xc7, 0x78, 0xb1, 0x8b,
    -	0x5b, 0x29, 0xce, 0x87, 0x89, 0x2e, 0x08, 0xac, 0xbb, 0xf1, 0xb8, 0x87, 0xdf, 0xf5, 0x3f, 0x41,
    -	0x24, 0x4f, 0x40, 0x52, 0xdf, 0x9e, 0x94, 0x5e, 0x45, 0x95, 0x1e, 0x5a, 0xd4, 0xc2, 0x5b, 0xab,
    -	0x1d, 0xa0, 0x9a, 0xc8, 0x21, 0x72, 0x76, 0x8c, 0xfd, 0x21, 0xde, 0xb0, 0x69, 0x6c, 0x72, 0x9d,
    -	0xe6, 0x10, 0xf9, 0xf7, 0xe6, 0xde, 0x9a, 0x3e, 0x85, 0xc6, 0xd2, 0xfd, 0x61, 0xa1, 0x86, 0xe9,
    -	0x08, 0x98, 0x90, 0xf6, 0xf9, 0x5a, 0x7a, 0xbc, 0x5d, 0x7a, 0x4d, 0x9b, 0xec, 0xad, 0xf9, 0x8c,
    -	0xdd, 0x9b, 0xca, 0x52, 0xf2, 0xb7, 0xa8, 0xce, 0x24, 0xa4, 0xc2, 0xd9, 0x39, 0xa8, 0x1e, 0x36,
    -	0x8f, 0x1e, 0x6c, 0x17, 0xdd, 0xdf, 0x9b, 0x2b, 0xeb, 0xc7, 0x1a, 0x0e, 0x67, 0x8e, 0xee, 0xf7,
    -	0xea, 0x3c, 0xb8, 0x7e, 0x8c, 0xfd, 0x1c, 0xed, 0x0f, 0xf8, 0x45, 0x0c, 0xc5, 0x71, 0x0c, 0x99,
    -	0x64, 0xf2, 0xd2, 0xc4, 0x6f, 0xf8, 0xb6, 0x2a, 0xbd, 0xfd, 0x37, 0x7f, 0xdd, 0x84, 0x2b, 0x9d,
    -	0x76, 0x80, 0xda, 0x17, 0x5a, 0xf4, 0x6a, 0x54, 0x98, 0xf1, 0xa7, 0x10, 0xf1, 0x2c, 0x16, 0x66,
    -	0xc1, 0x75, 0xdf, 0x51, 0xa5, 0xd7, 0x0e, 0xee, 0xb8, 0x0f, 0xef, 0xa4, 0xec, 0x3e, 0x6a, 0xd2,
    -	0xe8, 0xf3, 0x88, 0x15, 0x70, 0xc6, 0x52, 0x70, 0xaa, 0x66, 0x8b, 0x64, 0xbb, 0x2d, 0x9e, 0xb0,
    -	0xa8, 0xe0, 0x1a, 0xf3, 0xef, 0xab, 0xd2, 0x6b, 0xbe, 0x5c, 0x78, 0xc2, 0x65, 0xa9, 0x7d, 0x8e,
    -	0x1a, 0x05, 0x64, 0xf0, 0xc5, 0x4c, 0xa8, 0xfd, 0xdf, 0x84, 0x3d, 0x55, 0x7a, 0x8d, 0xf0, 0xc6,
    -	0x12, 0x2e, 0x84, 0xf6, 0x0b, 0xd4, 0x32, 0x2f, 0x3b, 0x2b, 0x68, 0x26, 0x98, 0x7e, 0x9b, 0x70,
    -	0xea, 0x66, 0x17, 0x6d, 0x55, 0x7a, 0xad, 0x60, 0xe5, 0x2e, 0x5c, 0xeb, 0xf6, 0x5f, 0x4f, 0xa6,
    -	0x6e, 0xe5, 0x6a, 0xea, 0x56, 0xae, 0xa7, 0x6e, 0xe5, 0xab, 0x72, 0xad, 0x89, 0x72, 0xad, 0x2b,
    -	0xe5, 0x5a, 0xd7, 0xca, 0xb5, 0x7e, 0x29, 0xd7, 0xfa, 0xf6, 0xdb, 0xad, 0xbc, 0xf7, 0x36, 0xfc,
    -	0x54, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x81, 0x42, 0xfe, 0x76, 0x04, 0x00, 0x00,
    +	// 600 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e,
    +	0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1,
    +	0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae,
    +	0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f,
    +	0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61,
    +	0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d,
    +	0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e,
    +	0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8,
    +	0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf,
    +	0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe,
    +	0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f,
    +	0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26,
    +	0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51,
    +	0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51,
    +	0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26,
    +	0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1,
    +	0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89,
    +	0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73,
    +	0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57,
    +	0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe,
    +	0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f,
    +	0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83,
    +	0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76,
    +	0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8,
    +	0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f,
    +	0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66,
    +	0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea,
    +	0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b,
    +	0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd,
    +	0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc,
    +	0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee,
    +	0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e,
    +	0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05,
    +	0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee,
    +	0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea,
    +	0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed,
    +	0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
    +	0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00,
     }
     
     func (m *Lease) Marshal() (dAtA []byte, err error) {
    @@ -285,6 +292,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.PreferredHolder != nil {
    +		i -= len(*m.PreferredHolder)
    +		copy(dAtA[i:], *m.PreferredHolder)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder)))
    +		i--
    +		dAtA[i] = 0x3a
    +	}
    +	if m.Strategy != nil {
    +		i -= len(*m.Strategy)
    +		copy(dAtA[i:], *m.Strategy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy)))
    +		i--
    +		dAtA[i] = 0x32
    +	}
     	if m.LeaseTransitions != nil {
     		i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions))
     		i--
    @@ -394,6 +415,14 @@ func (m *LeaseSpec) Size() (n int) {
     	if m.LeaseTransitions != nil {
     		n += 1 + sovGenerated(uint64(*m.LeaseTransitions))
     	}
    +	if m.Strategy != nil {
    +		l = len(*m.Strategy)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.PreferredHolder != nil {
    +		l = len(*m.PreferredHolder)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -440,6 +469,8 @@ func (this *LeaseSpec) String() string {
     		`AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`,
     		`RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
     		`LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`,
    +		`Strategy:` + valueToStringGenerated(this.Strategy) + `,`,
    +		`PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -859,6 +890,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error {
     				}
     			}
     			m.LeaseTransitions = &v
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])
    +			m.Strategy = &s
    +			iNdEx = postIndex
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.PreferredHolder = &s
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    index 92c8918b80..088811a74b 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
    @@ -21,6 +21,7 @@ syntax = "proto2";
     
     package k8s.io.api.coordination.v1beta1;
     
    +import "k8s.io/api/coordination/v1/generated.proto";
     import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
     import "k8s.io/apimachinery/pkg/runtime/generated.proto";
     import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    @@ -32,7 +33,7 @@ option go_package = "k8s.io/api/coordination/v1beta1";
     message Lease {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec contains the specification of the Lease.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -45,7 +46,7 @@ message LeaseList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated Lease items = 2;
    @@ -54,6 +55,8 @@ message LeaseList {
     // LeaseSpec is a specification of a Lease.
     message LeaseSpec {
       // holderIdentity contains the identity of the holder of a current lease.
    +  // If Coordinated Leader Election is used, the holder identity must be
    +  // equal to the elected LeaseCandidate.metadata.name field.
       // +optional
       optional string holderIdentity = 1;
     
    @@ -65,16 +68,28 @@ message LeaseSpec {
     
       // acquireTime is a time when the current lease was acquired.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3;
     
       // renewTime is a time when the current holder of a lease has last
       // updated the lease.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4;
     
       // leaseTransitions is the number of transitions of a lease between
       // holders.
       // +optional
       optional int32 leaseTransitions = 5;
    +
    +  // Strategy indicates the strategy for picking the leader for coordinated leader election
    +  // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +  // +featureGate=CoordinatedLeaderElection
    +  // +optional
    +  optional string strategy = 6;
    +
    +  // PreferredHolder signals to a lease holder that the lease has a
    +  // more optimal holder and should be given up.
    +  // +featureGate=CoordinatedLeaderElection
    +  // +optional
    +  optional string preferredHolder = 7;
     }
     
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go
    index 3a3d5f32e2..d63fc30a9e 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/types.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go
    @@ -17,6 +17,7 @@ limitations under the License.
     package v1beta1
     
     import (
    +	v1 "k8s.io/api/coordination/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    @@ -42,6 +43,8 @@ type Lease struct {
     // LeaseSpec is a specification of a Lease.
     type LeaseSpec struct {
     	// holderIdentity contains the identity of the holder of a current lease.
    +	// If Coordinated Leader Election is used, the holder identity must be
    +	// equal to the elected LeaseCandidate.metadata.name field.
     	// +optional
     	HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"`
     	// leaseDurationSeconds is a duration that candidates for a lease need
    @@ -60,6 +63,16 @@ type LeaseSpec struct {
     	// holders.
     	// +optional
     	LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"`
    +	// Strategy indicates the strategy for picking the leader for coordinated leader election
    +	// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
    +	// +featureGate=CoordinatedLeaderElection
    +	// +optional
    +	Strategy *v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
    +	// PreferredHolder signals to a lease holder that the lease has a
    +	// more optimal holder and should be given up.
    +	// +featureGate=CoordinatedLeaderElection
    +	// +optional
    +	PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"`
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    index 78ca4e393f..50fe8ea189 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
    @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string {
     
     var map_LeaseSpec = map[string]string{
     	"":                     "LeaseSpec is a specification of a Lease.",
    -	"holderIdentity":       "holderIdentity contains the identity of the holder of a current lease.",
    +	"holderIdentity":       "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.",
     	"leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.",
     	"acquireTime":          "acquireTime is a time when the current lease was acquired.",
     	"renewTime":            "renewTime is a time when the current holder of a lease has last updated the lease.",
     	"leaseTransitions":     "leaseTransitions is the number of transitions of a lease between holders.",
    +	"strategy":             "Strategy indicates the strategy for picking the leader for coordinated leader election (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
    +	"preferredHolder":      "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up.",
     }
     
     func (LeaseSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    index 3adfd87203..dcef1e346a 100644
    --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
    @@ -22,6 +22,7 @@ limitations under the License.
     package v1beta1
     
     import (
    +	v1 "k8s.io/api/coordination/v1"
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    @@ -111,6 +112,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) {
     		*out = new(int32)
     		**out = **in
     	}
    +	if in.Strategy != nil {
    +		in, out := &in.Strategy, &out.Strategy
    +		*out = new(v1.CoordinatedLeaseStrategy)
    +		**out = **in
    +	}
    +	if in.PreferredHolder != nil {
    +		in, out := &in.PreferredHolder, &out.PreferredHolder
    +		*out = new(string)
    +		**out = **in
    +	}
     	return
     }
     
    diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go
    index 1bdf0b25b1..bc0041b331 100644
    --- a/vendor/k8s.io/api/core/v1/doc.go
    +++ b/vendor/k8s.io/api/core/v1/doc.go
    @@ -17,6 +17,8 @@ limitations under the License.
     // +k8s:openapi-gen=true
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
    +// +k8s:prerelease-lifecycle-gen=true
    +// +groupName=
     
     // Package v1 is the v1 version of the core API.
     package v1 // import "k8s.io/api/core/v1"
    diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
    index d52d8da189..5654ee4829 100644
    --- a/vendor/k8s.io/api/core/v1/generated.pb.go
    +++ b/vendor/k8s.io/api/core/v1/generated.pb.go
    @@ -497,38 +497,10 @@ func (m *CinderVolumeSource) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo
     
    -func (m *ClaimSource) Reset()      { *m = ClaimSource{} }
    -func (*ClaimSource) ProtoMessage() {}
    -func (*ClaimSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{16}
    -}
    -func (m *ClaimSource) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ClaimSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ClaimSource) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ClaimSource.Merge(m, src)
    -}
    -func (m *ClaimSource) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ClaimSource) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ClaimSource.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ClaimSource proto.InternalMessageInfo
    -
     func (m *ClientIPConfig) Reset()      { *m = ClientIPConfig{} }
     func (*ClientIPConfig) ProtoMessage() {}
     func (*ClientIPConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{17}
    +	return fileDescriptor_6c07b07c062484ab, []int{16}
     }
     func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -556,7 +528,7 @@ var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo
     func (m *ClusterTrustBundleProjection) Reset()      { *m = ClusterTrustBundleProjection{} }
     func (*ClusterTrustBundleProjection) ProtoMessage() {}
     func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{18}
    +	return fileDescriptor_6c07b07c062484ab, []int{17}
     }
     func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -584,7 +556,7 @@ var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo
     func (m *ComponentCondition) Reset()      { *m = ComponentCondition{} }
     func (*ComponentCondition) ProtoMessage() {}
     func (*ComponentCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{19}
    +	return fileDescriptor_6c07b07c062484ab, []int{18}
     }
     func (m *ComponentCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -612,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo
     func (m *ComponentStatus) Reset()      { *m = ComponentStatus{} }
     func (*ComponentStatus) ProtoMessage() {}
     func (*ComponentStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{20}
    +	return fileDescriptor_6c07b07c062484ab, []int{19}
     }
     func (m *ComponentStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -640,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo
     func (m *ComponentStatusList) Reset()      { *m = ComponentStatusList{} }
     func (*ComponentStatusList) ProtoMessage() {}
     func (*ComponentStatusList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{21}
    +	return fileDescriptor_6c07b07c062484ab, []int{20}
     }
     func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -668,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo
     func (m *ConfigMap) Reset()      { *m = ConfigMap{} }
     func (*ConfigMap) ProtoMessage() {}
     func (*ConfigMap) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{22}
    +	return fileDescriptor_6c07b07c062484ab, []int{21}
     }
     func (m *ConfigMap) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -696,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo
     func (m *ConfigMapEnvSource) Reset()      { *m = ConfigMapEnvSource{} }
     func (*ConfigMapEnvSource) ProtoMessage() {}
     func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{23}
    +	return fileDescriptor_6c07b07c062484ab, []int{22}
     }
     func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -724,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo
     func (m *ConfigMapKeySelector) Reset()      { *m = ConfigMapKeySelector{} }
     func (*ConfigMapKeySelector) ProtoMessage() {}
     func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{24}
    +	return fileDescriptor_6c07b07c062484ab, []int{23}
     }
     func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -752,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo
     func (m *ConfigMapList) Reset()      { *m = ConfigMapList{} }
     func (*ConfigMapList) ProtoMessage() {}
     func (*ConfigMapList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{25}
    +	return fileDescriptor_6c07b07c062484ab, []int{24}
     }
     func (m *ConfigMapList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -780,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo
     func (m *ConfigMapNodeConfigSource) Reset()      { *m = ConfigMapNodeConfigSource{} }
     func (*ConfigMapNodeConfigSource) ProtoMessage() {}
     func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{26}
    +	return fileDescriptor_6c07b07c062484ab, []int{25}
     }
     func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -808,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo
     func (m *ConfigMapProjection) Reset()      { *m = ConfigMapProjection{} }
     func (*ConfigMapProjection) ProtoMessage() {}
     func (*ConfigMapProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{27}
    +	return fileDescriptor_6c07b07c062484ab, []int{26}
     }
     func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -836,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo
     func (m *ConfigMapVolumeSource) Reset()      { *m = ConfigMapVolumeSource{} }
     func (*ConfigMapVolumeSource) ProtoMessage() {}
     func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{28}
    +	return fileDescriptor_6c07b07c062484ab, []int{27}
     }
     func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -864,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo
     func (m *Container) Reset()      { *m = Container{} }
     func (*Container) ProtoMessage() {}
     func (*Container) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{29}
    +	return fileDescriptor_6c07b07c062484ab, []int{28}
     }
     func (m *Container) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -892,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo
     func (m *ContainerImage) Reset()      { *m = ContainerImage{} }
     func (*ContainerImage) ProtoMessage() {}
     func (*ContainerImage) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{30}
    +	return fileDescriptor_6c07b07c062484ab, []int{29}
     }
     func (m *ContainerImage) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -920,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo
     func (m *ContainerPort) Reset()      { *m = ContainerPort{} }
     func (*ContainerPort) ProtoMessage() {}
     func (*ContainerPort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{31}
    +	return fileDescriptor_6c07b07c062484ab, []int{30}
     }
     func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -948,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
     func (m *ContainerResizePolicy) Reset()      { *m = ContainerResizePolicy{} }
     func (*ContainerResizePolicy) ProtoMessage() {}
     func (*ContainerResizePolicy) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{32}
    +	return fileDescriptor_6c07b07c062484ab, []int{31}
     }
     func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -976,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo
     func (m *ContainerState) Reset()      { *m = ContainerState{} }
     func (*ContainerState) ProtoMessage() {}
     func (*ContainerState) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{33}
    +	return fileDescriptor_6c07b07c062484ab, []int{32}
     }
     func (m *ContainerState) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1004,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo
     func (m *ContainerStateRunning) Reset()      { *m = ContainerStateRunning{} }
     func (*ContainerStateRunning) ProtoMessage() {}
     func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{34}
    +	return fileDescriptor_6c07b07c062484ab, []int{33}
     }
     func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1032,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
     func (m *ContainerStateTerminated) Reset()      { *m = ContainerStateTerminated{} }
     func (*ContainerStateTerminated) ProtoMessage() {}
     func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{35}
    +	return fileDescriptor_6c07b07c062484ab, []int{34}
     }
     func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1060,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo
     func (m *ContainerStateWaiting) Reset()      { *m = ContainerStateWaiting{} }
     func (*ContainerStateWaiting) ProtoMessage() {}
     func (*ContainerStateWaiting) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{36}
    +	return fileDescriptor_6c07b07c062484ab, []int{35}
     }
     func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1088,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo
     func (m *ContainerStatus) Reset()      { *m = ContainerStatus{} }
     func (*ContainerStatus) ProtoMessage() {}
     func (*ContainerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{37}
    +	return fileDescriptor_6c07b07c062484ab, []int{36}
     }
     func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1113,6 +1085,34 @@ func (m *ContainerStatus) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
     
    +func (m *ContainerUser) Reset()      { *m = ContainerUser{} }
    +func (*ContainerUser) ProtoMessage() {}
    +func (*ContainerUser) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{37}
    +}
    +func (m *ContainerUser) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ContainerUser) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ContainerUser.Merge(m, src)
    +}
    +func (m *ContainerUser) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ContainerUser) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ContainerUser.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ContainerUser proto.InternalMessageInfo
    +
     func (m *DaemonEndpoint) Reset()      { *m = DaemonEndpoint{} }
     func (*DaemonEndpoint) ProtoMessage() {}
     func (*DaemonEndpoint) Descriptor() ([]byte, []int) {
    @@ -2149,10 +2149,38 @@ func (m *ISCSIVolumeSource) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
     
    +func (m *ImageVolumeSource) Reset()      { *m = ImageVolumeSource{} }
    +func (*ImageVolumeSource) ProtoMessage() {}
    +func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{75}
    +}
    +func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ImageVolumeSource) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ImageVolumeSource.Merge(m, src)
    +}
    +func (m *ImageVolumeSource) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ImageVolumeSource) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
    +
     func (m *KeyToPath) Reset()      { *m = KeyToPath{} }
     func (*KeyToPath) ProtoMessage() {}
     func (*KeyToPath) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{75}
    +	return fileDescriptor_6c07b07c062484ab, []int{76}
     }
     func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2180,7 +2208,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
     func (m *Lifecycle) Reset()      { *m = Lifecycle{} }
     func (*Lifecycle) ProtoMessage() {}
     func (*Lifecycle) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{76}
    +	return fileDescriptor_6c07b07c062484ab, []int{77}
     }
     func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2208,7 +2236,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
     func (m *LifecycleHandler) Reset()      { *m = LifecycleHandler{} }
     func (*LifecycleHandler) ProtoMessage() {}
     func (*LifecycleHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{77}
    +	return fileDescriptor_6c07b07c062484ab, []int{78}
     }
     func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
     func (m *LimitRange) Reset()      { *m = LimitRange{} }
     func (*LimitRange) ProtoMessage() {}
     func (*LimitRange) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{78}
    +	return fileDescriptor_6c07b07c062484ab, []int{79}
     }
     func (m *LimitRange) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
     func (m *LimitRangeItem) Reset()      { *m = LimitRangeItem{} }
     func (*LimitRangeItem) ProtoMessage() {}
     func (*LimitRangeItem) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{79}
    +	return fileDescriptor_6c07b07c062484ab, []int{80}
     }
     func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
     func (m *LimitRangeList) Reset()      { *m = LimitRangeList{} }
     func (*LimitRangeList) ProtoMessage() {}
     func (*LimitRangeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{80}
    +	return fileDescriptor_6c07b07c062484ab, []int{81}
     }
     func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
     func (m *LimitRangeSpec) Reset()      { *m = LimitRangeSpec{} }
     func (*LimitRangeSpec) ProtoMessage() {}
     func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{81}
    +	return fileDescriptor_6c07b07c062484ab, []int{82}
     }
     func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2345,10 +2373,38 @@ func (m *LimitRangeSpec) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
     
    +func (m *LinuxContainerUser) Reset()      { *m = LinuxContainerUser{} }
    +func (*LinuxContainerUser) ProtoMessage() {}
    +func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{83}
    +}
    +func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *LinuxContainerUser) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_LinuxContainerUser.Merge(m, src)
    +}
    +func (m *LinuxContainerUser) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *LinuxContainerUser) XXX_DiscardUnknown() {
    +	xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
    +
     func (m *List) Reset()      { *m = List{} }
     func (*List) ProtoMessage() {}
     func (*List) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{82}
    +	return fileDescriptor_6c07b07c062484ab, []int{84}
     }
     func (m *List) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2376,7 +2432,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
     func (m *LoadBalancerIngress) Reset()      { *m = LoadBalancerIngress{} }
     func (*LoadBalancerIngress) ProtoMessage() {}
     func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{83}
    +	return fileDescriptor_6c07b07c062484ab, []int{85}
     }
     func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2404,7 +2460,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
     func (m *LoadBalancerStatus) Reset()      { *m = LoadBalancerStatus{} }
     func (*LoadBalancerStatus) ProtoMessage() {}
     func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{84}
    +	return fileDescriptor_6c07b07c062484ab, []int{86}
     }
     func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2432,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
     func (m *LocalObjectReference) Reset()      { *m = LocalObjectReference{} }
     func (*LocalObjectReference) ProtoMessage() {}
     func (*LocalObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{85}
    +	return fileDescriptor_6c07b07c062484ab, []int{87}
     }
     func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2460,7 +2516,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
     func (m *LocalVolumeSource) Reset()      { *m = LocalVolumeSource{} }
     func (*LocalVolumeSource) ProtoMessage() {}
     func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{86}
    +	return fileDescriptor_6c07b07c062484ab, []int{88}
     }
     func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2488,7 +2544,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
     func (m *ModifyVolumeStatus) Reset()      { *m = ModifyVolumeStatus{} }
     func (*ModifyVolumeStatus) ProtoMessage() {}
     func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{87}
    +	return fileDescriptor_6c07b07c062484ab, []int{89}
     }
     func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2516,7 +2572,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
     func (m *NFSVolumeSource) Reset()      { *m = NFSVolumeSource{} }
     func (*NFSVolumeSource) ProtoMessage() {}
     func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{88}
    +	return fileDescriptor_6c07b07c062484ab, []int{90}
     }
     func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2544,7 +2600,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
     func (m *Namespace) Reset()      { *m = Namespace{} }
     func (*Namespace) ProtoMessage() {}
     func (*Namespace) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{89}
    +	return fileDescriptor_6c07b07c062484ab, []int{91}
     }
     func (m *Namespace) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2572,7 +2628,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
     func (m *NamespaceCondition) Reset()      { *m = NamespaceCondition{} }
     func (*NamespaceCondition) ProtoMessage() {}
     func (*NamespaceCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{90}
    +	return fileDescriptor_6c07b07c062484ab, []int{92}
     }
     func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2600,7 +2656,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
     func (m *NamespaceList) Reset()      { *m = NamespaceList{} }
     func (*NamespaceList) ProtoMessage() {}
     func (*NamespaceList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{91}
    +	return fileDescriptor_6c07b07c062484ab, []int{93}
     }
     func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2628,7 +2684,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
     func (m *NamespaceSpec) Reset()      { *m = NamespaceSpec{} }
     func (*NamespaceSpec) ProtoMessage() {}
     func (*NamespaceSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{92}
    +	return fileDescriptor_6c07b07c062484ab, []int{94}
     }
     func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2656,7 +2712,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
     func (m *NamespaceStatus) Reset()      { *m = NamespaceStatus{} }
     func (*NamespaceStatus) ProtoMessage() {}
     func (*NamespaceStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{93}
    +	return fileDescriptor_6c07b07c062484ab, []int{95}
     }
     func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2684,7 +2740,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
     func (m *Node) Reset()      { *m = Node{} }
     func (*Node) ProtoMessage() {}
     func (*Node) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{94}
    +	return fileDescriptor_6c07b07c062484ab, []int{96}
     }
     func (m *Node) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2712,7 +2768,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
     func (m *NodeAddress) Reset()      { *m = NodeAddress{} }
     func (*NodeAddress) ProtoMessage() {}
     func (*NodeAddress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{95}
    +	return fileDescriptor_6c07b07c062484ab, []int{97}
     }
     func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2740,7 +2796,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
     func (m *NodeAffinity) Reset()      { *m = NodeAffinity{} }
     func (*NodeAffinity) ProtoMessage() {}
     func (*NodeAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{96}
    +	return fileDescriptor_6c07b07c062484ab, []int{98}
     }
     func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2768,7 +2824,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
     func (m *NodeCondition) Reset()      { *m = NodeCondition{} }
     func (*NodeCondition) ProtoMessage() {}
     func (*NodeCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{97}
    +	return fileDescriptor_6c07b07c062484ab, []int{99}
     }
     func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2796,7 +2852,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
     func (m *NodeConfigSource) Reset()      { *m = NodeConfigSource{} }
     func (*NodeConfigSource) ProtoMessage() {}
     func (*NodeConfigSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{98}
    +	return fileDescriptor_6c07b07c062484ab, []int{100}
     }
     func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2824,7 +2880,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
     func (m *NodeConfigStatus) Reset()      { *m = NodeConfigStatus{} }
     func (*NodeConfigStatus) ProtoMessage() {}
     func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{99}
    +	return fileDescriptor_6c07b07c062484ab, []int{101}
     }
     func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2852,7 +2908,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
     func (m *NodeDaemonEndpoints) Reset()      { *m = NodeDaemonEndpoints{} }
     func (*NodeDaemonEndpoints) ProtoMessage() {}
     func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{100}
    +	return fileDescriptor_6c07b07c062484ab, []int{102}
     }
     func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2877,10 +2933,38 @@ func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
     
    +func (m *NodeFeatures) Reset()      { *m = NodeFeatures{} }
    +func (*NodeFeatures) ProtoMessage() {}
    +func (*NodeFeatures) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{103}
    +}
    +func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *NodeFeatures) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_NodeFeatures.Merge(m, src)
    +}
    +func (m *NodeFeatures) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *NodeFeatures) XXX_DiscardUnknown() {
    +	xxx_messageInfo_NodeFeatures.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
    +
     func (m *NodeList) Reset()      { *m = NodeList{} }
     func (*NodeList) ProtoMessage() {}
     func (*NodeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{101}
    +	return fileDescriptor_6c07b07c062484ab, []int{104}
     }
     func (m *NodeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2908,7 +2992,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
     func (m *NodeProxyOptions) Reset()      { *m = NodeProxyOptions{} }
     func (*NodeProxyOptions) ProtoMessage() {}
     func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{102}
    +	return fileDescriptor_6c07b07c062484ab, []int{105}
     }
     func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2936,7 +3020,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
     func (m *NodeRuntimeHandler) Reset()      { *m = NodeRuntimeHandler{} }
     func (*NodeRuntimeHandler) ProtoMessage() {}
     func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{103}
    +	return fileDescriptor_6c07b07c062484ab, []int{106}
     }
     func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2964,7 +3048,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
     func (m *NodeRuntimeHandlerFeatures) Reset()      { *m = NodeRuntimeHandlerFeatures{} }
     func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
     func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{104}
    +	return fileDescriptor_6c07b07c062484ab, []int{107}
     }
     func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -2992,7 +3076,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
     func (m *NodeSelector) Reset()      { *m = NodeSelector{} }
     func (*NodeSelector) ProtoMessage() {}
     func (*NodeSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{105}
    +	return fileDescriptor_6c07b07c062484ab, []int{108}
     }
     func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3020,7 +3104,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
     func (m *NodeSelectorRequirement) Reset()      { *m = NodeSelectorRequirement{} }
     func (*NodeSelectorRequirement) ProtoMessage() {}
     func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{106}
    +	return fileDescriptor_6c07b07c062484ab, []int{109}
     }
     func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3048,7 +3132,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
     func (m *NodeSelectorTerm) Reset()      { *m = NodeSelectorTerm{} }
     func (*NodeSelectorTerm) ProtoMessage() {}
     func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{107}
    +	return fileDescriptor_6c07b07c062484ab, []int{110}
     }
     func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3076,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
     func (m *NodeSpec) Reset()      { *m = NodeSpec{} }
     func (*NodeSpec) ProtoMessage() {}
     func (*NodeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{108}
    +	return fileDescriptor_6c07b07c062484ab, []int{111}
     }
     func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3104,7 +3188,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
     func (m *NodeStatus) Reset()      { *m = NodeStatus{} }
     func (*NodeStatus) ProtoMessage() {}
     func (*NodeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{109}
    +	return fileDescriptor_6c07b07c062484ab, []int{112}
     }
     func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3132,7 +3216,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
     func (m *NodeSystemInfo) Reset()      { *m = NodeSystemInfo{} }
     func (*NodeSystemInfo) ProtoMessage() {}
     func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{110}
    +	return fileDescriptor_6c07b07c062484ab, []int{113}
     }
     func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3160,7 +3244,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
     func (m *ObjectFieldSelector) Reset()      { *m = ObjectFieldSelector{} }
     func (*ObjectFieldSelector) ProtoMessage() {}
     func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{111}
    +	return fileDescriptor_6c07b07c062484ab, []int{114}
     }
     func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3188,7 +3272,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
     func (m *ObjectReference) Reset()      { *m = ObjectReference{} }
     func (*ObjectReference) ProtoMessage() {}
     func (*ObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{112}
    +	return fileDescriptor_6c07b07c062484ab, []int{115}
     }
     func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3216,7 +3300,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
     func (m *PersistentVolume) Reset()      { *m = PersistentVolume{} }
     func (*PersistentVolume) ProtoMessage() {}
     func (*PersistentVolume) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{113}
    +	return fileDescriptor_6c07b07c062484ab, []int{116}
     }
     func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3244,7 +3328,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
     func (m *PersistentVolumeClaim) Reset()      { *m = PersistentVolumeClaim{} }
     func (*PersistentVolumeClaim) ProtoMessage() {}
     func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{114}
    +	return fileDescriptor_6c07b07c062484ab, []int{117}
     }
     func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3272,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
     func (m *PersistentVolumeClaimCondition) Reset()      { *m = PersistentVolumeClaimCondition{} }
     func (*PersistentVolumeClaimCondition) ProtoMessage() {}
     func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{115}
    +	return fileDescriptor_6c07b07c062484ab, []int{118}
     }
     func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3300,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
     func (m *PersistentVolumeClaimList) Reset()      { *m = PersistentVolumeClaimList{} }
     func (*PersistentVolumeClaimList) ProtoMessage() {}
     func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{116}
    +	return fileDescriptor_6c07b07c062484ab, []int{119}
     }
     func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3328,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
     func (m *PersistentVolumeClaimSpec) Reset()      { *m = PersistentVolumeClaimSpec{} }
     func (*PersistentVolumeClaimSpec) ProtoMessage() {}
     func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{117}
    +	return fileDescriptor_6c07b07c062484ab, []int{120}
     }
     func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3356,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
     func (m *PersistentVolumeClaimStatus) Reset()      { *m = PersistentVolumeClaimStatus{} }
     func (*PersistentVolumeClaimStatus) ProtoMessage() {}
     func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{118}
    +	return fileDescriptor_6c07b07c062484ab, []int{121}
     }
     func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3384,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
     func (m *PersistentVolumeClaimTemplate) Reset()      { *m = PersistentVolumeClaimTemplate{} }
     func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
     func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{119}
    +	return fileDescriptor_6c07b07c062484ab, []int{122}
     }
     func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3412,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
     func (m *PersistentVolumeClaimVolumeSource) Reset()      { *m = PersistentVolumeClaimVolumeSource{} }
     func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
     func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{120}
    +	return fileDescriptor_6c07b07c062484ab, []int{123}
     }
     func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3440,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
     func (m *PersistentVolumeList) Reset()      { *m = PersistentVolumeList{} }
     func (*PersistentVolumeList) ProtoMessage() {}
     func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{121}
    +	return fileDescriptor_6c07b07c062484ab, []int{124}
     }
     func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3468,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
     func (m *PersistentVolumeSource) Reset()      { *m = PersistentVolumeSource{} }
     func (*PersistentVolumeSource) ProtoMessage() {}
     func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{122}
    +	return fileDescriptor_6c07b07c062484ab, []int{125}
     }
     func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3496,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
     func (m *PersistentVolumeSpec) Reset()      { *m = PersistentVolumeSpec{} }
     func (*PersistentVolumeSpec) ProtoMessage() {}
     func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{123}
    +	return fileDescriptor_6c07b07c062484ab, []int{126}
     }
     func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3524,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
     func (m *PersistentVolumeStatus) Reset()      { *m = PersistentVolumeStatus{} }
     func (*PersistentVolumeStatus) ProtoMessage() {}
     func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{124}
    +	return fileDescriptor_6c07b07c062484ab, []int{127}
     }
     func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3552,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
     func (m *PhotonPersistentDiskVolumeSource) Reset()      { *m = PhotonPersistentDiskVolumeSource{} }
     func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
     func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{125}
    +	return fileDescriptor_6c07b07c062484ab, []int{128}
     }
     func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3580,7 +3664,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
     func (m *Pod) Reset()      { *m = Pod{} }
     func (*Pod) ProtoMessage() {}
     func (*Pod) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{126}
    +	return fileDescriptor_6c07b07c062484ab, []int{129}
     }
     func (m *Pod) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3608,7 +3692,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
     func (m *PodAffinity) Reset()      { *m = PodAffinity{} }
     func (*PodAffinity) ProtoMessage() {}
     func (*PodAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{127}
    +	return fileDescriptor_6c07b07c062484ab, []int{130}
     }
     func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3636,7 +3720,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
     func (m *PodAffinityTerm) Reset()      { *m = PodAffinityTerm{} }
     func (*PodAffinityTerm) ProtoMessage() {}
     func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{128}
    +	return fileDescriptor_6c07b07c062484ab, []int{131}
     }
     func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3664,7 +3748,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
     func (m *PodAntiAffinity) Reset()      { *m = PodAntiAffinity{} }
     func (*PodAntiAffinity) ProtoMessage() {}
     func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{129}
    +	return fileDescriptor_6c07b07c062484ab, []int{132}
     }
     func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3692,7 +3776,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
     func (m *PodAttachOptions) Reset()      { *m = PodAttachOptions{} }
     func (*PodAttachOptions) ProtoMessage() {}
     func (*PodAttachOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{130}
    +	return fileDescriptor_6c07b07c062484ab, []int{133}
     }
     func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3720,7 +3804,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
     func (m *PodCondition) Reset()      { *m = PodCondition{} }
     func (*PodCondition) ProtoMessage() {}
     func (*PodCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{131}
    +	return fileDescriptor_6c07b07c062484ab, []int{134}
     }
     func (m *PodCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3748,7 +3832,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
     func (m *PodDNSConfig) Reset()      { *m = PodDNSConfig{} }
     func (*PodDNSConfig) ProtoMessage() {}
     func (*PodDNSConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{132}
    +	return fileDescriptor_6c07b07c062484ab, []int{135}
     }
     func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3776,7 +3860,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
     func (m *PodDNSConfigOption) Reset()      { *m = PodDNSConfigOption{} }
     func (*PodDNSConfigOption) ProtoMessage() {}
     func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{133}
    +	return fileDescriptor_6c07b07c062484ab, []int{136}
     }
     func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3804,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
     func (m *PodExecOptions) Reset()      { *m = PodExecOptions{} }
     func (*PodExecOptions) ProtoMessage() {}
     func (*PodExecOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{134}
    +	return fileDescriptor_6c07b07c062484ab, []int{137}
     }
     func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3832,7 +3916,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
     func (m *PodIP) Reset()      { *m = PodIP{} }
     func (*PodIP) ProtoMessage() {}
     func (*PodIP) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{135}
    +	return fileDescriptor_6c07b07c062484ab, []int{138}
     }
     func (m *PodIP) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3860,7 +3944,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
     func (m *PodList) Reset()      { *m = PodList{} }
     func (*PodList) ProtoMessage() {}
     func (*PodList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{136}
    +	return fileDescriptor_6c07b07c062484ab, []int{139}
     }
     func (m *PodList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3888,7 +3972,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
     func (m *PodLogOptions) Reset()      { *m = PodLogOptions{} }
     func (*PodLogOptions) ProtoMessage() {}
     func (*PodLogOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{137}
    +	return fileDescriptor_6c07b07c062484ab, []int{140}
     }
     func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3916,7 +4000,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
     func (m *PodOS) Reset()      { *m = PodOS{} }
     func (*PodOS) ProtoMessage() {}
     func (*PodOS) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{138}
    +	return fileDescriptor_6c07b07c062484ab, []int{141}
     }
     func (m *PodOS) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3944,7 +4028,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
     func (m *PodPortForwardOptions) Reset()      { *m = PodPortForwardOptions{} }
     func (*PodPortForwardOptions) ProtoMessage() {}
     func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{139}
    +	return fileDescriptor_6c07b07c062484ab, []int{142}
     }
     func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -3972,7 +4056,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
     func (m *PodProxyOptions) Reset()      { *m = PodProxyOptions{} }
     func (*PodProxyOptions) ProtoMessage() {}
     func (*PodProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{140}
    +	return fileDescriptor_6c07b07c062484ab, []int{143}
     }
     func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4000,7 +4084,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
     func (m *PodReadinessGate) Reset()      { *m = PodReadinessGate{} }
     func (*PodReadinessGate) ProtoMessage() {}
     func (*PodReadinessGate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{141}
    +	return fileDescriptor_6c07b07c062484ab, []int{144}
     }
     func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4028,7 +4112,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
     func (m *PodResourceClaim) Reset()      { *m = PodResourceClaim{} }
     func (*PodResourceClaim) ProtoMessage() {}
     func (*PodResourceClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{142}
    +	return fileDescriptor_6c07b07c062484ab, []int{145}
     }
     func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4056,7 +4140,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
     func (m *PodResourceClaimStatus) Reset()      { *m = PodResourceClaimStatus{} }
     func (*PodResourceClaimStatus) ProtoMessage() {}
     func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{143}
    +	return fileDescriptor_6c07b07c062484ab, []int{146}
     }
     func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4084,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
     func (m *PodSchedulingGate) Reset()      { *m = PodSchedulingGate{} }
     func (*PodSchedulingGate) ProtoMessage() {}
     func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{144}
    +	return fileDescriptor_6c07b07c062484ab, []int{147}
     }
     func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4112,7 +4196,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
     func (m *PodSecurityContext) Reset()      { *m = PodSecurityContext{} }
     func (*PodSecurityContext) ProtoMessage() {}
     func (*PodSecurityContext) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{145}
    +	return fileDescriptor_6c07b07c062484ab, []int{148}
     }
     func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4140,7 +4224,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
     func (m *PodSignature) Reset()      { *m = PodSignature{} }
     func (*PodSignature) ProtoMessage() {}
     func (*PodSignature) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{146}
    +	return fileDescriptor_6c07b07c062484ab, []int{149}
     }
     func (m *PodSignature) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4168,7 +4252,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
     func (m *PodSpec) Reset()      { *m = PodSpec{} }
     func (*PodSpec) ProtoMessage() {}
     func (*PodSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{147}
    +	return fileDescriptor_6c07b07c062484ab, []int{150}
     }
     func (m *PodSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4196,7 +4280,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
     func (m *PodStatus) Reset()      { *m = PodStatus{} }
     func (*PodStatus) ProtoMessage() {}
     func (*PodStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{148}
    +	return fileDescriptor_6c07b07c062484ab, []int{151}
     }
     func (m *PodStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4224,7 +4308,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
     func (m *PodStatusResult) Reset()      { *m = PodStatusResult{} }
     func (*PodStatusResult) ProtoMessage() {}
     func (*PodStatusResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{149}
    +	return fileDescriptor_6c07b07c062484ab, []int{152}
     }
     func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4252,7 +4336,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
     func (m *PodTemplate) Reset()      { *m = PodTemplate{} }
     func (*PodTemplate) ProtoMessage() {}
     func (*PodTemplate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{150}
    +	return fileDescriptor_6c07b07c062484ab, []int{153}
     }
     func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4280,7 +4364,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
     func (m *PodTemplateList) Reset()      { *m = PodTemplateList{} }
     func (*PodTemplateList) ProtoMessage() {}
     func (*PodTemplateList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{151}
    +	return fileDescriptor_6c07b07c062484ab, []int{154}
     }
     func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4308,7 +4392,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
     func (m *PodTemplateSpec) Reset()      { *m = PodTemplateSpec{} }
     func (*PodTemplateSpec) ProtoMessage() {}
     func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{152}
    +	return fileDescriptor_6c07b07c062484ab, []int{155}
     }
     func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4336,7 +4420,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
     func (m *PortStatus) Reset()      { *m = PortStatus{} }
     func (*PortStatus) ProtoMessage() {}
     func (*PortStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{153}
    +	return fileDescriptor_6c07b07c062484ab, []int{156}
     }
     func (m *PortStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4364,7 +4448,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
     func (m *PortworxVolumeSource) Reset()      { *m = PortworxVolumeSource{} }
     func (*PortworxVolumeSource) ProtoMessage() {}
     func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{154}
    +	return fileDescriptor_6c07b07c062484ab, []int{157}
     }
     func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4392,7 +4476,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
     func (m *Preconditions) Reset()      { *m = Preconditions{} }
     func (*Preconditions) ProtoMessage() {}
     func (*Preconditions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{155}
    +	return fileDescriptor_6c07b07c062484ab, []int{158}
     }
     func (m *Preconditions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4420,7 +4504,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
     func (m *PreferAvoidPodsEntry) Reset()      { *m = PreferAvoidPodsEntry{} }
     func (*PreferAvoidPodsEntry) ProtoMessage() {}
     func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{156}
    +	return fileDescriptor_6c07b07c062484ab, []int{159}
     }
     func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4448,7 +4532,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
     func (m *PreferredSchedulingTerm) Reset()      { *m = PreferredSchedulingTerm{} }
     func (*PreferredSchedulingTerm) ProtoMessage() {}
     func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{157}
    +	return fileDescriptor_6c07b07c062484ab, []int{160}
     }
     func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4476,7 +4560,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
     func (m *Probe) Reset()      { *m = Probe{} }
     func (*Probe) ProtoMessage() {}
     func (*Probe) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{158}
    +	return fileDescriptor_6c07b07c062484ab, []int{161}
     }
     func (m *Probe) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4504,7 +4588,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
     func (m *ProbeHandler) Reset()      { *m = ProbeHandler{} }
     func (*ProbeHandler) ProtoMessage() {}
     func (*ProbeHandler) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{159}
    +	return fileDescriptor_6c07b07c062484ab, []int{162}
     }
     func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4532,7 +4616,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
     func (m *ProjectedVolumeSource) Reset()      { *m = ProjectedVolumeSource{} }
     func (*ProjectedVolumeSource) ProtoMessage() {}
     func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{160}
    +	return fileDescriptor_6c07b07c062484ab, []int{163}
     }
     func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4560,7 +4644,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
     func (m *QuobyteVolumeSource) Reset()      { *m = QuobyteVolumeSource{} }
     func (*QuobyteVolumeSource) ProtoMessage() {}
     func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{161}
    +	return fileDescriptor_6c07b07c062484ab, []int{164}
     }
     func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4588,7 +4672,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
     func (m *RBDPersistentVolumeSource) Reset()      { *m = RBDPersistentVolumeSource{} }
     func (*RBDPersistentVolumeSource) ProtoMessage() {}
     func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{162}
    +	return fileDescriptor_6c07b07c062484ab, []int{165}
     }
     func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4616,7 +4700,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
     func (m *RBDVolumeSource) Reset()      { *m = RBDVolumeSource{} }
     func (*RBDVolumeSource) ProtoMessage() {}
     func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{163}
    +	return fileDescriptor_6c07b07c062484ab, []int{166}
     }
     func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4644,7 +4728,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
     func (m *RangeAllocation) Reset()      { *m = RangeAllocation{} }
     func (*RangeAllocation) ProtoMessage() {}
     func (*RangeAllocation) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{164}
    +	return fileDescriptor_6c07b07c062484ab, []int{167}
     }
     func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4672,7 +4756,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
     func (m *ReplicationController) Reset()      { *m = ReplicationController{} }
     func (*ReplicationController) ProtoMessage() {}
     func (*ReplicationController) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{165}
    +	return fileDescriptor_6c07b07c062484ab, []int{168}
     }
     func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4700,7 +4784,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
     func (m *ReplicationControllerCondition) Reset()      { *m = ReplicationControllerCondition{} }
     func (*ReplicationControllerCondition) ProtoMessage() {}
     func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{166}
    +	return fileDescriptor_6c07b07c062484ab, []int{169}
     }
     func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4728,7 +4812,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
     func (m *ReplicationControllerList) Reset()      { *m = ReplicationControllerList{} }
     func (*ReplicationControllerList) ProtoMessage() {}
     func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{167}
    +	return fileDescriptor_6c07b07c062484ab, []int{170}
     }
     func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4756,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
     func (m *ReplicationControllerSpec) Reset()      { *m = ReplicationControllerSpec{} }
     func (*ReplicationControllerSpec) ProtoMessage() {}
     func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{168}
    +	return fileDescriptor_6c07b07c062484ab, []int{171}
     }
     func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4784,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
     func (m *ReplicationControllerStatus) Reset()      { *m = ReplicationControllerStatus{} }
     func (*ReplicationControllerStatus) ProtoMessage() {}
     func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{169}
    +	return fileDescriptor_6c07b07c062484ab, []int{172}
     }
     func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4812,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
     func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
     func (*ResourceClaim) ProtoMessage() {}
     func (*ResourceClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{170}
    +	return fileDescriptor_6c07b07c062484ab, []int{173}
     }
     func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4840,7 +4924,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
     func (m *ResourceFieldSelector) Reset()      { *m = ResourceFieldSelector{} }
     func (*ResourceFieldSelector) ProtoMessage() {}
     func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{171}
    +	return fileDescriptor_6c07b07c062484ab, []int{174}
     }
     func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4865,10 +4949,38 @@ func (m *ResourceFieldSelector) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
     
    +func (m *ResourceHealth) Reset()      { *m = ResourceHealth{} }
    +func (*ResourceHealth) ProtoMessage() {}
    +func (*ResourceHealth) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{175}
    +}
    +func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceHealth) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceHealth.Merge(m, src)
    +}
    +func (m *ResourceHealth) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceHealth) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceHealth.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
    +
     func (m *ResourceQuota) Reset()      { *m = ResourceQuota{} }
     func (*ResourceQuota) ProtoMessage() {}
     func (*ResourceQuota) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{172}
    +	return fileDescriptor_6c07b07c062484ab, []int{176}
     }
     func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4896,7 +5008,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
     func (m *ResourceQuotaList) Reset()      { *m = ResourceQuotaList{} }
     func (*ResourceQuotaList) ProtoMessage() {}
     func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{173}
    +	return fileDescriptor_6c07b07c062484ab, []int{177}
     }
     func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4924,7 +5036,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
     func (m *ResourceQuotaSpec) Reset()      { *m = ResourceQuotaSpec{} }
     func (*ResourceQuotaSpec) ProtoMessage() {}
     func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{174}
    +	return fileDescriptor_6c07b07c062484ab, []int{178}
     }
     func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4952,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
     func (m *ResourceQuotaStatus) Reset()      { *m = ResourceQuotaStatus{} }
     func (*ResourceQuotaStatus) ProtoMessage() {}
     func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{175}
    +	return fileDescriptor_6c07b07c062484ab, []int{179}
     }
     func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -4980,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
     func (m *ResourceRequirements) Reset()      { *m = ResourceRequirements{} }
     func (*ResourceRequirements) ProtoMessage() {}
     func (*ResourceRequirements) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{176}
    +	return fileDescriptor_6c07b07c062484ab, []int{180}
     }
     func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5005,10 +5117,38 @@ func (m *ResourceRequirements) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
     
    +func (m *ResourceStatus) Reset()      { *m = ResourceStatus{} }
    +func (*ResourceStatus) ProtoMessage() {}
    +func (*ResourceStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_6c07b07c062484ab, []int{181}
    +}
    +func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceStatus.Merge(m, src)
    +}
    +func (m *ResourceStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
    +
     func (m *SELinuxOptions) Reset()      { *m = SELinuxOptions{} }
     func (*SELinuxOptions) ProtoMessage() {}
     func (*SELinuxOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{177}
    +	return fileDescriptor_6c07b07c062484ab, []int{182}
     }
     func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5036,7 +5176,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
     func (m *ScaleIOPersistentVolumeSource) Reset()      { *m = ScaleIOPersistentVolumeSource{} }
     func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
     func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{178}
    +	return fileDescriptor_6c07b07c062484ab, []int{183}
     }
     func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5064,7 +5204,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
     func (m *ScaleIOVolumeSource) Reset()      { *m = ScaleIOVolumeSource{} }
     func (*ScaleIOVolumeSource) ProtoMessage() {}
     func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{179}
    +	return fileDescriptor_6c07b07c062484ab, []int{184}
     }
     func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5092,7 +5232,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
     func (m *ScopeSelector) Reset()      { *m = ScopeSelector{} }
     func (*ScopeSelector) ProtoMessage() {}
     func (*ScopeSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{180}
    +	return fileDescriptor_6c07b07c062484ab, []int{185}
     }
     func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5120,7 +5260,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
     func (m *ScopedResourceSelectorRequirement) Reset()      { *m = ScopedResourceSelectorRequirement{} }
     func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
     func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{181}
    +	return fileDescriptor_6c07b07c062484ab, []int{186}
     }
     func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5148,7 +5288,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
     func (m *SeccompProfile) Reset()      { *m = SeccompProfile{} }
     func (*SeccompProfile) ProtoMessage() {}
     func (*SeccompProfile) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{182}
    +	return fileDescriptor_6c07b07c062484ab, []int{187}
     }
     func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5176,7 +5316,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
     func (m *Secret) Reset()      { *m = Secret{} }
     func (*Secret) ProtoMessage() {}
     func (*Secret) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{183}
    +	return fileDescriptor_6c07b07c062484ab, []int{188}
     }
     func (m *Secret) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5204,7 +5344,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
     func (m *SecretEnvSource) Reset()      { *m = SecretEnvSource{} }
     func (*SecretEnvSource) ProtoMessage() {}
     func (*SecretEnvSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{184}
    +	return fileDescriptor_6c07b07c062484ab, []int{189}
     }
     func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5232,7 +5372,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
     func (m *SecretKeySelector) Reset()      { *m = SecretKeySelector{} }
     func (*SecretKeySelector) ProtoMessage() {}
     func (*SecretKeySelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{185}
    +	return fileDescriptor_6c07b07c062484ab, []int{190}
     }
     func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5260,7 +5400,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
     func (m *SecretList) Reset()      { *m = SecretList{} }
     func (*SecretList) ProtoMessage() {}
     func (*SecretList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{186}
    +	return fileDescriptor_6c07b07c062484ab, []int{191}
     }
     func (m *SecretList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5288,7 +5428,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
     func (m *SecretProjection) Reset()      { *m = SecretProjection{} }
     func (*SecretProjection) ProtoMessage() {}
     func (*SecretProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{187}
    +	return fileDescriptor_6c07b07c062484ab, []int{192}
     }
     func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5316,7 +5456,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
     func (m *SecretReference) Reset()      { *m = SecretReference{} }
     func (*SecretReference) ProtoMessage() {}
     func (*SecretReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{188}
    +	return fileDescriptor_6c07b07c062484ab, []int{193}
     }
     func (m *SecretReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5344,7 +5484,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
     func (m *SecretVolumeSource) Reset()      { *m = SecretVolumeSource{} }
     func (*SecretVolumeSource) ProtoMessage() {}
     func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{189}
    +	return fileDescriptor_6c07b07c062484ab, []int{194}
     }
     func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5372,7 +5512,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
     func (m *SecurityContext) Reset()      { *m = SecurityContext{} }
     func (*SecurityContext) ProtoMessage() {}
     func (*SecurityContext) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{190}
    +	return fileDescriptor_6c07b07c062484ab, []int{195}
     }
     func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5400,7 +5540,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
     func (m *SerializedReference) Reset()      { *m = SerializedReference{} }
     func (*SerializedReference) ProtoMessage() {}
     func (*SerializedReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{191}
    +	return fileDescriptor_6c07b07c062484ab, []int{196}
     }
     func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5428,7 +5568,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
     func (m *Service) Reset()      { *m = Service{} }
     func (*Service) ProtoMessage() {}
     func (*Service) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{192}
    +	return fileDescriptor_6c07b07c062484ab, []int{197}
     }
     func (m *Service) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5456,7 +5596,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
     func (m *ServiceAccount) Reset()      { *m = ServiceAccount{} }
     func (*ServiceAccount) ProtoMessage() {}
     func (*ServiceAccount) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{193}
    +	return fileDescriptor_6c07b07c062484ab, []int{198}
     }
     func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5484,7 +5624,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
     func (m *ServiceAccountList) Reset()      { *m = ServiceAccountList{} }
     func (*ServiceAccountList) ProtoMessage() {}
     func (*ServiceAccountList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{194}
    +	return fileDescriptor_6c07b07c062484ab, []int{199}
     }
     func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5512,7 +5652,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
     func (m *ServiceAccountTokenProjection) Reset()      { *m = ServiceAccountTokenProjection{} }
     func (*ServiceAccountTokenProjection) ProtoMessage() {}
     func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{195}
    +	return fileDescriptor_6c07b07c062484ab, []int{200}
     }
     func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5540,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
     func (m *ServiceList) Reset()      { *m = ServiceList{} }
     func (*ServiceList) ProtoMessage() {}
     func (*ServiceList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{196}
    +	return fileDescriptor_6c07b07c062484ab, []int{201}
     }
     func (m *ServiceList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5568,7 +5708,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
     func (m *ServicePort) Reset()      { *m = ServicePort{} }
     func (*ServicePort) ProtoMessage() {}
     func (*ServicePort) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{197}
    +	return fileDescriptor_6c07b07c062484ab, []int{202}
     }
     func (m *ServicePort) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5596,7 +5736,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
     func (m *ServiceProxyOptions) Reset()      { *m = ServiceProxyOptions{} }
     func (*ServiceProxyOptions) ProtoMessage() {}
     func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{198}
    +	return fileDescriptor_6c07b07c062484ab, []int{203}
     }
     func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5624,7 +5764,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
     func (m *ServiceSpec) Reset()      { *m = ServiceSpec{} }
     func (*ServiceSpec) ProtoMessage() {}
     func (*ServiceSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{199}
    +	return fileDescriptor_6c07b07c062484ab, []int{204}
     }
     func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5652,7 +5792,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
     func (m *ServiceStatus) Reset()      { *m = ServiceStatus{} }
     func (*ServiceStatus) ProtoMessage() {}
     func (*ServiceStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{200}
    +	return fileDescriptor_6c07b07c062484ab, []int{205}
     }
     func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5680,7 +5820,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
     func (m *SessionAffinityConfig) Reset()      { *m = SessionAffinityConfig{} }
     func (*SessionAffinityConfig) ProtoMessage() {}
     func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{201}
    +	return fileDescriptor_6c07b07c062484ab, []int{206}
     }
     func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5708,7 +5848,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
     func (m *SleepAction) Reset()      { *m = SleepAction{} }
     func (*SleepAction) ProtoMessage() {}
     func (*SleepAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{202}
    +	return fileDescriptor_6c07b07c062484ab, []int{207}
     }
     func (m *SleepAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5736,7 +5876,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo
     func (m *StorageOSPersistentVolumeSource) Reset()      { *m = StorageOSPersistentVolumeSource{} }
     func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
     func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{203}
    +	return fileDescriptor_6c07b07c062484ab, []int{208}
     }
     func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5764,7 +5904,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
     func (m *StorageOSVolumeSource) Reset()      { *m = StorageOSVolumeSource{} }
     func (*StorageOSVolumeSource) ProtoMessage() {}
     func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{204}
    +	return fileDescriptor_6c07b07c062484ab, []int{209}
     }
     func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5792,7 +5932,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
     func (m *Sysctl) Reset()      { *m = Sysctl{} }
     func (*Sysctl) ProtoMessage() {}
     func (*Sysctl) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{205}
    +	return fileDescriptor_6c07b07c062484ab, []int{210}
     }
     func (m *Sysctl) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5820,7 +5960,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
     func (m *TCPSocketAction) Reset()      { *m = TCPSocketAction{} }
     func (*TCPSocketAction) ProtoMessage() {}
     func (*TCPSocketAction) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{206}
    +	return fileDescriptor_6c07b07c062484ab, []int{211}
     }
     func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5848,7 +5988,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
     func (m *Taint) Reset()      { *m = Taint{} }
     func (*Taint) ProtoMessage() {}
     func (*Taint) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{207}
    +	return fileDescriptor_6c07b07c062484ab, []int{212}
     }
     func (m *Taint) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5876,7 +6016,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
     func (m *Toleration) Reset()      { *m = Toleration{} }
     func (*Toleration) ProtoMessage() {}
     func (*Toleration) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{208}
    +	return fileDescriptor_6c07b07c062484ab, []int{213}
     }
     func (m *Toleration) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5904,7 +6044,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
     func (m *TopologySelectorLabelRequirement) Reset()      { *m = TopologySelectorLabelRequirement{} }
     func (*TopologySelectorLabelRequirement) ProtoMessage() {}
     func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{209}
    +	return fileDescriptor_6c07b07c062484ab, []int{214}
     }
     func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5932,7 +6072,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
     func (m *TopologySelectorTerm) Reset()      { *m = TopologySelectorTerm{} }
     func (*TopologySelectorTerm) ProtoMessage() {}
     func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{210}
    +	return fileDescriptor_6c07b07c062484ab, []int{215}
     }
     func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5960,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
     func (m *TopologySpreadConstraint) Reset()      { *m = TopologySpreadConstraint{} }
     func (*TopologySpreadConstraint) ProtoMessage() {}
     func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{211}
    +	return fileDescriptor_6c07b07c062484ab, []int{216}
     }
     func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -5988,7 +6128,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
     func (m *TypedLocalObjectReference) Reset()      { *m = TypedLocalObjectReference{} }
     func (*TypedLocalObjectReference) ProtoMessage() {}
     func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{212}
    +	return fileDescriptor_6c07b07c062484ab, []int{217}
     }
     func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6016,7 +6156,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
     func (m *TypedObjectReference) Reset()      { *m = TypedObjectReference{} }
     func (*TypedObjectReference) ProtoMessage() {}
     func (*TypedObjectReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{213}
    +	return fileDescriptor_6c07b07c062484ab, []int{218}
     }
     func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6044,7 +6184,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
     func (m *Volume) Reset()      { *m = Volume{} }
     func (*Volume) ProtoMessage() {}
     func (*Volume) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{214}
    +	return fileDescriptor_6c07b07c062484ab, []int{219}
     }
     func (m *Volume) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6072,7 +6212,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
     func (m *VolumeDevice) Reset()      { *m = VolumeDevice{} }
     func (*VolumeDevice) ProtoMessage() {}
     func (*VolumeDevice) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{215}
    +	return fileDescriptor_6c07b07c062484ab, []int{220}
     }
     func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6100,7 +6240,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
     func (m *VolumeMount) Reset()      { *m = VolumeMount{} }
     func (*VolumeMount) ProtoMessage() {}
     func (*VolumeMount) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{216}
    +	return fileDescriptor_6c07b07c062484ab, []int{221}
     }
     func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6128,7 +6268,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
     func (m *VolumeMountStatus) Reset()      { *m = VolumeMountStatus{} }
     func (*VolumeMountStatus) ProtoMessage() {}
     func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{217}
    +	return fileDescriptor_6c07b07c062484ab, []int{222}
     }
     func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6156,7 +6296,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
     func (m *VolumeNodeAffinity) Reset()      { *m = VolumeNodeAffinity{} }
     func (*VolumeNodeAffinity) ProtoMessage() {}
     func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{218}
    +	return fileDescriptor_6c07b07c062484ab, []int{223}
     }
     func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6184,7 +6324,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
     func (m *VolumeProjection) Reset()      { *m = VolumeProjection{} }
     func (*VolumeProjection) ProtoMessage() {}
     func (*VolumeProjection) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{219}
    +	return fileDescriptor_6c07b07c062484ab, []int{224}
     }
     func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6212,7 +6352,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
     func (m *VolumeResourceRequirements) Reset()      { *m = VolumeResourceRequirements{} }
     func (*VolumeResourceRequirements) ProtoMessage() {}
     func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{220}
    +	return fileDescriptor_6c07b07c062484ab, []int{225}
     }
     func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6240,7 +6380,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
     func (m *VolumeSource) Reset()      { *m = VolumeSource{} }
     func (*VolumeSource) ProtoMessage() {}
     func (*VolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{221}
    +	return fileDescriptor_6c07b07c062484ab, []int{226}
     }
     func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6268,7 +6408,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
     func (m *VsphereVirtualDiskVolumeSource) Reset()      { *m = VsphereVirtualDiskVolumeSource{} }
     func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
     func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{222}
    +	return fileDescriptor_6c07b07c062484ab, []int{227}
     }
     func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6296,7 +6436,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
     func (m *WeightedPodAffinityTerm) Reset()      { *m = WeightedPodAffinityTerm{} }
     func (*WeightedPodAffinityTerm) ProtoMessage() {}
     func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{223}
    +	return fileDescriptor_6c07b07c062484ab, []int{228}
     }
     func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6324,7 +6464,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
     func (m *WindowsSecurityContextOptions) Reset()      { *m = WindowsSecurityContextOptions{} }
     func (*WindowsSecurityContextOptions) ProtoMessage() {}
     func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_6c07b07c062484ab, []int{224}
    +	return fileDescriptor_6c07b07c062484ab, []int{229}
     }
     func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -6368,7 +6508,6 @@ func init() {
     	proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
     	proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource")
     	proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
    -	proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource")
     	proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig")
     	proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection")
     	proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition")
    @@ -6393,6 +6532,7 @@ func init() {
     	proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting")
     	proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus")
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry")
    +	proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser")
     	proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint")
     	proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection")
     	proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile")
    @@ -6432,6 +6572,7 @@ func init() {
     	proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource")
     	proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource")
     	proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource")
    +	proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource")
     	proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath")
     	proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle")
     	proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler")
    @@ -6444,6 +6585,7 @@ func init() {
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry")
     	proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList")
     	proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec")
    +	proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser")
     	proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List")
     	proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress")
     	proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus")
    @@ -6463,6 +6605,7 @@ func init() {
     	proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource")
     	proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus")
     	proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints")
    +	proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures")
     	proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList")
     	proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions")
     	proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler")
    @@ -6543,6 +6686,7 @@ func init() {
     	proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus")
     	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim")
     	proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector")
    +	proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth")
     	proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota")
     	proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList")
     	proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec")
    @@ -6553,6 +6697,7 @@ func init() {
     	proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements")
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry")
     	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry")
    +	proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus")
     	proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions")
     	proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource")
     	proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource")
    @@ -6613,989 +6758,1011 @@ func init() {
     }
     
     var fileDescriptor_6c07b07c062484ab = []byte{
    -	// 15708 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x59, 0x8c, 0x1c, 0xd9,
    -	0x75, 0x20, 0xaa, 0xc8, 0xac, 0xf5, 0xd4, 0x7e, 0x8b, 0x64, 0x17, 0xab, 0x49, 0x26, 0x3b, 0xba,
    -	0x9b, 0xcd, 0xde, 0x8a, 0x62, 0x2f, 0xea, 0x56, 0x77, 0xab, 0xad, 0x5a, 0xc9, 0x6c, 0x56, 0x15,
    -	0xb3, 0x6f, 0x16, 0x49, 0xa9, 0xd5, 0x92, 0x15, 0xcc, 0xbc, 0x55, 0x15, 0xaa, 0xcc, 0x88, 0xec,
    -	0x88, 0xc8, 0x22, 0x8b, 0x4f, 0x86, 0x6d, 0xf9, 0x59, 0xb6, 0x64, 0x3f, 0x40, 0x78, 0xf0, 0x5b,
    -	0x20, 0x1b, 0xc6, 0x83, 0x9f, 0x9f, 0x97, 0xa7, 0x67, 0xbf, 0xd1, 0xc8, 0xe3, 0x4d, 0xde, 0xc6,
    -	0x33, 0x03, 0xd8, 0x83, 0x81, 0xc7, 0x63, 0xc0, 0x96, 0x31, 0xc6, 0x94, 0x47, 0xf4, 0x00, 0x86,
    -	0x3f, 0xc6, 0x36, 0x3c, 0xf3, 0x31, 0x53, 0xf0, 0x8c, 0x07, 0x77, 0x8d, 0x7b, 0x63, 0xc9, 0xcc,
    -	0x62, 0x93, 0xa5, 0x96, 0xd0, 0x7f, 0x99, 0xe7, 0x9c, 0x7b, 0xee, 0x8d, 0xbb, 0x9e, 0x7b, 0xce,
    -	0xb9, 0xe7, 0x80, 0xbd, 0xf3, 0x72, 0x38, 0xe7, 0xfa, 0x17, 0x9c, 0x96, 0x7b, 0xa1, 0xe6, 0x07,
    -	0xe4, 0xc2, 0xee, 0xc5, 0x0b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x5c, 0x2b, 0xf0, 0x23,
    -	0x1f, 0x21, 0x4e, 0x33, 0xe7, 0xb4, 0xdc, 0x39, 0x4a, 0x33, 0xb7, 0x7b, 0x71, 0xf6, 0xd9, 0x2d,
    -	0x37, 0xda, 0x6e, 0xdf, 0x9c, 0xab, 0xf9, 0xcd, 0x0b, 0x5b, 0xfe, 0x96, 0x7f, 0x81, 0x91, 0xde,
    -	0x6c, 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x59, 0xcc, 0xbe, 0x10, 0x57, 0xd3, 0x74, 0x6a,
    -	0xdb, 0xae, 0x47, 0x82, 0xbd, 0x0b, 0xad, 0x9d, 0x2d, 0x56, 0x6f, 0x40, 0x42, 0xbf, 0x1d, 0xd4,
    -	0x48, 0xb2, 0xe2, 0x8e, 0xa5, 0xc2, 0x0b, 0x4d, 0x12, 0x39, 0x19, 0xcd, 0x9d, 0xbd, 0x90, 0x57,
    -	0x2a, 0x68, 0x7b, 0x91, 0xdb, 0x4c, 0x57, 0xf3, 0xa1, 0x6e, 0x05, 0xc2, 0xda, 0x36, 0x69, 0x3a,
    -	0xa9, 0x72, 0xcf, 0xe7, 0x95, 0x6b, 0x47, 0x6e, 0xe3, 0x82, 0xeb, 0x45, 0x61, 0x14, 0x24, 0x0b,
    -	0xd9, 0xdf, 0xb0, 0xe0, 0xec, 0xfc, 0x8d, 0xea, 0x72, 0xc3, 0x09, 0x23, 0xb7, 0xb6, 0xd0, 0xf0,
    -	0x6b, 0x3b, 0xd5, 0xc8, 0x0f, 0xc8, 0x75, 0xbf, 0xd1, 0x6e, 0x92, 0x2a, 0xeb, 0x08, 0xf4, 0x0c,
    -	0x0c, 0xed, 0xb2, 0xff, 0xe5, 0xa5, 0x19, 0xeb, 0xac, 0x75, 0x7e, 0x78, 0x61, 0xf2, 0xf7, 0xf6,
    -	0x4b, 0x1f, 0xb8, 0xbb, 0x5f, 0x1a, 0xba, 0x2e, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0x30, 0xb0, 0x19,
    -	0x6e, 0xec, 0xb5, 0xc8, 0x4c, 0x81, 0xd1, 0x8e, 0x0b, 0xda, 0x81, 0x95, 0x2a, 0x85, 0x62, 0x81,
    -	0x45, 0x17, 0x60, 0xb8, 0xe5, 0x04, 0x91, 0x1b, 0xb9, 0xbe, 0x37, 0x53, 0x3c, 0x6b, 0x9d, 0xef,
    -	0x5f, 0x98, 0x12, 0xa4, 0xc3, 0x15, 0x89, 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x55,
    -	0xaf, 0xb1, 0x37, 0xd3, 0x77, 0xd6, 0x3a, 0x3f, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6,
    -	0x97, 0x0b, 0x30, 0x34, 0xbf, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xae, 0xc3, 0xa8, 0xe7, 0xd7,
    -	0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xb9, 0xb3, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d,
    -	0x4c, 0xde, 0xdd, 0x2f, 0x8d, 0xea, 0x10, 0x6c, 0xf0, 0x41, 0x18, 0x46, 0x5a, 0x7e, 0x5d, 0xb1,
    -	0x2d, 0x30, 0xb6, 0xa5, 0x2c, 0xb6, 0x95, 0x98, 0x6c, 0x61, 0xe2, 0xee, 0x7e, 0x69, 0x44, 0x03,
    -	0x60, 0x9d, 0x09, 0xba, 0x09, 0x13, 0xf4, 0xaf, 0x17, 0xb9, 0x8a, 0x6f, 0x91, 0xf1, 0x7d, 0x34,
    -	0x8f, 0xaf, 0x46, 0xba, 0x30, 0x7d, 0x77, 0xbf, 0x34, 0x91, 0x00, 0xe2, 0x24, 0x43, 0xfb, 0x47,
    -	0x2d, 0x98, 0x98, 0x6f, 0xb5, 0xe6, 0x83, 0xa6, 0x1f, 0x54, 0x02, 0x7f, 0xd3, 0x6d, 0x10, 0xf4,
    -	0x12, 0xf4, 0x45, 0x74, 0xd4, 0xf8, 0x08, 0x3f, 0x2a, 0xba, 0xb6, 0x8f, 0x8e, 0xd5, 0xc1, 0x7e,
    -	0x69, 0x3a, 0x41, 0xce, 0x86, 0x92, 0x15, 0x40, 0x1f, 0x85, 0xc9, 0x86, 0x5f, 0x73, 0x1a, 0xdb,
    -	0x7e, 0x18, 0x09, 0xac, 0x18, 0xfa, 0x63, 0x77, 0xf7, 0x4b, 0x93, 0xab, 0x09, 0x1c, 0x4e, 0x51,
    -	0xdb, 0x77, 0x60, 0x7c, 0x3e, 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x09, 0x85, 0x5e, 0x80, 0x3e,
    -	0xcf, 0x69, 0xca, 0xc6, 0x9c, 0x95, 0x8d, 0x59, 0x77, 0x9a, 0xb4, 0x31, 0x93, 0xd7, 0x3c, 0xf7,
    -	0x9d, 0xb6, 0x98, 0xa4, 0x14, 0x86, 0x19, 0x35, 0x7a, 0x0e, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, 0xa9,
    -	0x38, 0xd1, 0xb6, 0x68, 0x03, 0x12, 0x65, 0x61, 0x49, 0x61, 0xb0, 0x46, 0x65, 0xdf, 0x86, 0xe1,
    -	0xf9, 0x5d, 0xdf, 0xad, 0x57, 0xfc, 0x7a, 0x88, 0x76, 0x60, 0xa2, 0x15, 0x90, 0x4d, 0x12, 0x28,
    -	0xd0, 0x8c, 0x75, 0xb6, 0x78, 0x7e, 0xe4, 0xb9, 0xf3, 0x99, 0x7d, 0x6f, 0x92, 0x2e, 0x7b, 0x51,
    -	0xb0, 0xb7, 0xf0, 0x90, 0xa8, 0x6f, 0x22, 0x81, 0xc5, 0x49, 0xce, 0xf6, 0x3f, 0x2f, 0xc0, 0xf1,
    -	0xf9, 0x3b, 0xed, 0x80, 0x2c, 0xb9, 0xe1, 0x4e, 0x72, 0xc1, 0xd5, 0xdd, 0x70, 0x67, 0x3d, 0xee,
    -	0x01, 0x35, 0xd3, 0x97, 0x04, 0x1c, 0x2b, 0x0a, 0xf4, 0x2c, 0x0c, 0xd2, 0xdf, 0xd7, 0x70, 0x59,
    -	0x7c, 0xf2, 0xb4, 0x20, 0x1e, 0x59, 0x72, 0x22, 0x67, 0x89, 0xa3, 0xb0, 0xa4, 0x41, 0x6b, 0x30,
    -	0x52, 0x63, 0xfb, 0xc3, 0xd6, 0x9a, 0x5f, 0x27, 0x6c, 0x6e, 0x0d, 0x2f, 0x3c, 0x4d, 0xc9, 0x17,
    -	0x63, 0xf0, 0xc1, 0x7e, 0x69, 0x86, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5,
    -	0xdc, 0xfb, 0x18, 0x27, 0xc8, 0x58, 0xea, 0xe7, 0xb5, 0x95, 0xdb, 0xcf, 0x56, 0xee, 0x68, 0xf6,
    -	0xaa, 0x45, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0x33, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57,
    -	0x5c, 0xaf, 0x7e, 0xb0, 0x5f, 0x9a, 0x32, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xc9, 0x82,
    -	0x12, 0xc3, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f,
    -	0x03, 0x08, 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74,
    -	0x7f, 0x0a, 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x7f, 0xaa, 0x4a, 0x04, 0x8e, 0x69,
    -	0x8c, 0xfd, 0xa9, 0xd8, 0x6d, 0x7f, 0x42, 0x1f, 0x81, 0x89, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64,
    -	0x07, 0xb2, 0x15, 0x5c, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0xbf, 0x96, 0x98, 0x3c, 0xf4, 0xab,
    -	0xdf, 0xe3, 0xdf, 0x6a, 0xff, 0xaa, 0x05, 0x83, 0x0b, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x69,
    -	0x18, 0xa2, 0x47, 0x65, 0xdd, 0x89, 0x1c, 0xb1, 0x0d, 0x7f, 0x50, 0x5b, 0x5b, 0xea, 0xe4, 0x9a,
    -	0x6b, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x47, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0x33, 0xa4, 0x16,
    -	0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6,
    -	0x48, 0x24, 0xf6, 0xe3, 0xcc, 0x7d, 0x93, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x53,
    -	0x6a, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0xdf, 0x06, 0xe1, 0xe4, 0x62, 0xb5, 0x9c, 0x33, 0xaf,
    -	0xce, 0xc1, 0x40, 0x3d, 0x70, 0x77, 0x49, 0x20, 0xfa, 0x59, 0x71, 0x59, 0x62, 0x50, 0x2c, 0xb0,
    -	0xe8, 0x65, 0x18, 0xe5, 0xe7, 0xe3, 0x65, 0xc7, 0xab, 0xc7, 0xdb, 0xa3, 0xa0, 0x1e, 0xbd, 0xae,
    -	0xe1, 0xb0, 0x41, 0x79, 0xc8, 0x49, 0x75, 0x2e, 0xb1, 0x18, 0xf3, 0xce, 0xde, 0x2f, 0x58, 0x30,
    -	0xc9, 0xab, 0x99, 0x8f, 0xa2, 0xc0, 0xbd, 0xd9, 0x8e, 0x48, 0x38, 0xd3, 0xcf, 0x76, 0xba, 0xc5,
    -	0xac, 0xde, 0xca, 0xed, 0x81, 0xb9, 0xeb, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x11, 0xf5, 0x4e, 0x26,
    -	0xd1, 0x38, 0x55, 0x2d, 0xfa, 0x01, 0x0b, 0x66, 0x6b, 0xbe, 0x17, 0x05, 0x7e, 0xa3, 0x41, 0x82,
    -	0x4a, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xf9, 0x3c, 0xc5, 0x64, 0x93, 0xed, 0x04, 0x39, 0x63, 0xa8,
    -	0x88, 0xc4, 0x18, 0x9e, 0xb9, 0xbb, 0x5f, 0x9a, 0x5d, 0xcc, 0x65, 0x85, 0x3b, 0x54, 0x83, 0x76,
    -	0x00, 0xd1, 0x93, 0xbd, 0x1a, 0x39, 0x5b, 0x24, 0xae, 0x7c, 0xb0, 0xf7, 0xca, 0x4f, 0xdc, 0xdd,
    -	0x2f, 0xa1, 0xf5, 0x14, 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x03, 0xc7, 0x28, 0x34, 0xf5, 0xad, 0x43,
    -	0xbd, 0x57, 0x37, 0x73, 0x77, 0xbf, 0x74, 0x6c, 0x3d, 0x83, 0x09, 0xce, 0x64, 0x8d, 0xbe, 0xcf,
    -	0x82, 0x93, 0xf1, 0xe7, 0x2f, 0xdf, 0x6e, 0x39, 0x5e, 0x3d, 0xae, 0x78, 0xb8, 0xf7, 0x8a, 0xe9,
    -	0x9e, 0x7c, 0x72, 0x31, 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x4d, 0x9b, 0x96, 0xac, 0x1b,
    -	0x7a, 0xaf, 0xfb, 0xa1, 0xbb, 0xfb, 0xa5, 0xe9, 0xf5, 0x34, 0x0f, 0x9c, 0xc5, 0x78, 0x76, 0x11,
    -	0x8e, 0x67, 0xce, 0x4e, 0x34, 0x09, 0xc5, 0x1d, 0xc2, 0x85, 0xc0, 0x61, 0x4c, 0x7f, 0xa2, 0x63,
    -	0xd0, 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0x95, 0xc2, 0xcb, 0x96, 0xfd, 0x2f,
    -	0x8a, 0x30, 0xb1, 0x58, 0x2d, 0xdf, 0xd3, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e,
    -	0x44, 0x8b, 0xb9, 0x87, 0xe8, 0xf7, 0x66, 0x2c, 0xd9, 0x3e, 0xb6, 0x64, 0x3f, 0x9c, 0xb3, 0x64,
    -	0xef, 0xf3, 0x42, 0xdd, 0xcd, 0x99, 0xb5, 0xfd, 0x6c, 0x00, 0x33, 0x25, 0x24, 0x26, 0xfb, 0x25,
    -	0xb7, 0xda, 0x43, 0x4e, 0xdd, 0xfb, 0x33, 0x8e, 0x35, 0x18, 0x5d, 0x74, 0x5a, 0xce, 0x4d, 0xb7,
    -	0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x13, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x0d, 0x2f, 0x1c, 0xbf,
    -	0xbb, 0x5f, 0x2a, 0xce, 0xd7, 0xa9, 0x98, 0x01, 0x8a, 0x6a, 0x0f, 0x53, 0x0a, 0xf4, 0x14, 0xf4,
    -	0xd5, 0x03, 0xbf, 0x35, 0x53, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x05, 0x7e, 0x2b, 0x41, 0xca,
    -	0x68, 0xec, 0xdf, 0x29, 0xc0, 0xa9, 0x45, 0xd2, 0xda, 0x5e, 0xa9, 0xe6, 0x9c, 0x17, 0xe7, 0x61,
    -	0xa8, 0xe9, 0x7b, 0x6e, 0xe4, 0x07, 0xa1, 0xa8, 0x9a, 0xcd, 0x88, 0x35, 0x01, 0xc3, 0x0a, 0x8b,
    -	0xce, 0x42, 0x5f, 0x2b, 0x16, 0x62, 0x47, 0xa5, 0x00, 0xcc, 0xc4, 0x57, 0x86, 0xa1, 0x14, 0xed,
    -	0x90, 0x04, 0x62, 0xc6, 0x28, 0x8a, 0x6b, 0x21, 0x09, 0x30, 0xc3, 0xc4, 0x92, 0x00, 0x95, 0x11,
    -	0xc4, 0x89, 0x90, 0x90, 0x04, 0x28, 0x06, 0x6b, 0x54, 0xa8, 0x02, 0xc3, 0x61, 0x62, 0x64, 0x7b,
    -	0x5a, 0x9a, 0x63, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2,
    -	0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0xee,
    -	0x57, 0xef, 0xfd, 0x67, 0x0b, 0x4e, 0x2d, 0xba, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0x5c,
    -	0xe5, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x3e, 0x4c, 0x31, 0xfb, 0x6f, 0x2c, 0x40, 0xfc,
    -	0xb3, 0xdf, 0x73, 0x1f, 0x7b, 0x2d, 0xfd, 0xb1, 0xf7, 0x61, 0x5a, 0xd8, 0xff, 0xbf, 0x05, 0x23,
    -	0x8b, 0x0d, 0xc7, 0x6d, 0x8a, 0x4f, 0x5d, 0x84, 0x29, 0xa9, 0xb7, 0x62, 0x60, 0x4d, 0xf6, 0xa7,
    -	0x9b, 0xdb, 0x14, 0x4e, 0x22, 0x71, 0x9a, 0x1e, 0x7d, 0x02, 0x4e, 0x1a, 0xc0, 0x0d, 0xd2, 0x6c,
    -	0x35, 0x9c, 0x48, 0xbf, 0x15, 0xb0, 0xd3, 0x1f, 0xe7, 0x11, 0xe1, 0xfc, 0xf2, 0xf6, 0x2a, 0x8c,
    -	0x2f, 0x36, 0x5c, 0xe2, 0x45, 0xe5, 0xca, 0xa2, 0xef, 0x6d, 0xba, 0x5b, 0xe8, 0x15, 0x18, 0x8f,
    -	0xdc, 0x26, 0xf1, 0xdb, 0x51, 0x95, 0xd4, 0x7c, 0x8f, 0xdd, 0xb5, 0xad, 0xf3, 0xfd, 0x0b, 0xe8,
    -	0xee, 0x7e, 0x69, 0x7c, 0xc3, 0xc0, 0xe0, 0x04, 0xa5, 0xfd, 0x33, 0x74, 0xa7, 0x6d, 0xb4, 0xc3,
    -	0x88, 0x04, 0x1b, 0x41, 0x3b, 0x8c, 0x16, 0xda, 0x54, 0x5a, 0xae, 0x04, 0x3e, 0xed, 0x40, 0xd7,
    -	0xf7, 0xd0, 0x29, 0x43, 0x81, 0x30, 0x24, 0x95, 0x07, 0x42, 0x51, 0x30, 0x07, 0x10, 0xba, 0x5b,
    -	0x1e, 0x09, 0xb4, 0x4f, 0x1b, 0x67, 0x8b, 0x5b, 0x41, 0xb1, 0x46, 0x81, 0x1a, 0x30, 0xd6, 0x70,
    -	0x6e, 0x92, 0x46, 0x95, 0x34, 0x48, 0x2d, 0xf2, 0x03, 0xa1, 0x91, 0x79, 0xbe, 0xb7, 0x9b, 0xcb,
    -	0xaa, 0x5e, 0x74, 0x61, 0xea, 0xee, 0x7e, 0x69, 0xcc, 0x00, 0x61, 0x93, 0x39, 0xdd, 0xec, 0xfc,
    -	0x16, 0xfd, 0x0a, 0xa7, 0xa1, 0x5f, 0x97, 0xaf, 0x0a, 0x18, 0x56, 0x58, 0xb5, 0xd9, 0xf5, 0xe5,
    -	0x6d, 0x76, 0xf6, 0x9f, 0xd1, 0xa5, 0xe1, 0x37, 0x5b, 0xbe, 0x47, 0xbc, 0x68, 0xd1, 0xf7, 0xea,
    -	0x5c, 0x97, 0xf6, 0x8a, 0xa1, 0xec, 0x39, 0x97, 0x50, 0xf6, 0x9c, 0x48, 0x97, 0xd0, 0xf4, 0x3d,
    -	0x1f, 0x86, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3a, 0xee, 0x11, 0xb9, 0x50, 0xaa, 0x0c, 0x7a,
    -	0xb0, 0x5f, 0x9a, 0x50, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74,
    -	0xb6, 0xa4, 0xa0, 0x33, 0x21, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93,
    -	0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x63, 0x82, 0xb0, 0x7f, 0x99, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xda,
    -	0x82, 0x09, 0xd5, 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x02, 0xa8, 0xc9, 0x0f, 0x0c, 0x99,
    -	0x60, 0x30, 0xf2, 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6,
    -	0xcd, 0xfe, 0x4d, 0x0b, 0xa6, 0x13, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x3b, 0xf5, 0x55, 0x73,
    -	0x3d, 0x4e, 0x3e, 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb,
    -	0x11, 0x69, 0xca, 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0x52, 0xa6, 0x25, 0x31,
    -	0x67, 0x60, 0xff, 0x4e, 0x11, 0x86, 0xf9, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0xc3,
    -	0xb0, 0xdb, 0x6c, 0xb6, 0x23, 0xe7, 0xa6, 0x38, 0xa9, 0x87, 0xf8, 0xae, 0x59, 0x96, 0x40, 0x1c,
    -	0xe3, 0x51, 0x19, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x91, 0xfd, 0x95, 0xa2, 0xed, 0x73, 0x4b,
    -	0x4e, 0xe4, 0x70, 0x21, 0x59, 0xad, 0x2b, 0x0a, 0xc2, 0x8c, 0x05, 0x72, 0x00, 0x6e, 0xba, 0x9e,
    -	0x13, 0xec, 0x51, 0xd8, 0x4c, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x05, 0x45, 0xcf, 0xd9, 0xaa,
    -	0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xfb, 0x12, 0x0c, 0x2b, 0xe2, 0xc3, 0xc8, 0xba, 0xb3, 0x1f,
    -	0x81, 0x89, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xea, 0xa2, 0xf2, 0xaf, 0xb3, 0x2d, 0x43, 0xb4, 0x7a,
    -	0xd9, 0xdb, 0x15, 0x47, 0xcc, 0x1d, 0x38, 0xd6, 0xc8, 0x38, 0xa4, 0xc4, 0xb8, 0xf6, 0x7e, 0xa8,
    -	0x9d, 0x12, 0x9f, 0x7d, 0x2c, 0x0b, 0x8b, 0x33, 0xeb, 0x30, 0x76, 0xc4, 0x42, 0xa7, 0x1d, 0x91,
    -	0xee, 0x77, 0xc7, 0x54, 0xe3, 0xaf, 0x90, 0x3d, 0xb5, 0xa9, 0x7e, 0x2b, 0x9b, 0x7f, 0x9a, 0xf7,
    -	0x3e, 0xdf, 0x2e, 0x47, 0x04, 0x83, 0xe2, 0x15, 0xb2, 0xc7, 0x87, 0x42, 0xff, 0xba, 0x62, 0xc7,
    -	0xaf, 0xfb, 0xaa, 0x05, 0x63, 0xea, 0xeb, 0x8e, 0x60, 0x5f, 0x58, 0x30, 0xf7, 0x85, 0xd3, 0x1d,
    -	0x27, 0x78, 0xce, 0x8e, 0xf0, 0xf5, 0x02, 0x9c, 0x54, 0x34, 0xf4, 0xda, 0xc7, 0xff, 0x88, 0x59,
    -	0x75, 0x01, 0x86, 0x3d, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91,
    -	0xe7, 0xc5, 0x87, 0xf6, 0xa8, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x02, 0x14, 0xdb, 0x6e, 0x5d, 0x1c,
    -	0x30, 0x1f, 0x94, 0xbd, 0x7d, 0xad, 0xbc, 0x74, 0xb0, 0x5f, 0x7a, 0x24, 0xcf, 0x48, 0x46, 0x4f,
    -	0xb6, 0x70, 0xee, 0x5a, 0x79, 0x09, 0xd3, 0xc2, 0x68, 0x1e, 0x26, 0xa4, 0x28, 0x73, 0x9d, 0x4a,
    -	0xd2, 0xbe, 0x27, 0xce, 0x21, 0xa5, 0xde, 0xc7, 0x26, 0x1a, 0x27, 0xe9, 0xd1, 0x12, 0x4c, 0xee,
    -	0xb4, 0x6f, 0x92, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x21, 0x5c, 0xf9, 0x3d, 0x1c, 0x5f, 0xba, 0xaf,
    -	0x24, 0xf0, 0x38, 0x55, 0xc2, 0xfe, 0x07, 0x76, 0x1e, 0x88, 0xde, 0xd3, 0xe4, 0x9b, 0x6f, 0xe5,
    -	0x74, 0xee, 0x65, 0x56, 0x5c, 0x21, 0x7b, 0x1b, 0x3e, 0x95, 0x43, 0xb2, 0x67, 0x85, 0x31, 0xe7,
    -	0xfb, 0x3a, 0xce, 0xf9, 0x5f, 0x2a, 0xc0, 0x71, 0xd5, 0x03, 0x86, 0x7c, 0xff, 0xed, 0xde, 0x07,
    -	0x17, 0x61, 0xa4, 0x4e, 0x36, 0x9d, 0x76, 0x23, 0x52, 0x96, 0x98, 0x7e, 0x6e, 0x1c, 0x5c, 0x8a,
    -	0xc1, 0x58, 0xa7, 0x39, 0x44, 0xb7, 0xfd, 0xc2, 0x18, 0x3b, 0x88, 0x23, 0x87, 0xce, 0x71, 0xb5,
    -	0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x28, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, 0x79, 0xab, 0x4c,
    -	0x81, 0x98, 0xe3, 0xd0, 0xe3, 0x30, 0x58, 0xf3, 0x9b, 0x4d, 0xc7, 0xab, 0xb3, 0x23, 0x6f, 0x78,
    -	0x61, 0x84, 0xca, 0x6e, 0x8b, 0x1c, 0x84, 0x25, 0x8e, 0x0a, 0xdf, 0x4e, 0xb0, 0xc5, 0xd5, 0x53,
    -	0x42, 0xf8, 0x9e, 0x0f, 0xb6, 0x42, 0xcc, 0xa0, 0xf4, 0x76, 0x7d, 0xcb, 0x0f, 0x76, 0x5c, 0x6f,
    -	0x6b, 0xc9, 0x0d, 0xc4, 0x92, 0x50, 0x67, 0xe1, 0x0d, 0x85, 0xc1, 0x1a, 0x15, 0x5a, 0x81, 0xfe,
    -	0x96, 0x1f, 0x44, 0xe1, 0xcc, 0x00, 0xeb, 0xee, 0x47, 0x72, 0x36, 0x22, 0xfe, 0xb5, 0x15, 0x3f,
    -	0x88, 0xe2, 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0x8b, 0xa3, 0x55, 0x18, 0x24, 0xde, 0xee, 0x4a, 0xe0,
    -	0x37, 0x67, 0xa6, 0xf3, 0x39, 0x2d, 0x73, 0x12, 0x3e, 0xcd, 0x62, 0x19, 0x55, 0x80, 0xb1, 0x64,
    -	0x81, 0x3e, 0x0c, 0x45, 0xe2, 0xed, 0xce, 0x0c, 0x32, 0x4e, 0xb3, 0x39, 0x9c, 0xae, 0x3b, 0x41,
    -	0xbc, 0xe7, 0x2f, 0x7b, 0xbb, 0x98, 0x96, 0x41, 0x1f, 0x87, 0x61, 0xb9, 0x61, 0x84, 0x42, 0xef,
    -	0x9b, 0x39, 0x61, 0xe5, 0x36, 0x83, 0xc9, 0x3b, 0x6d, 0x37, 0x20, 0x4d, 0xe2, 0x45, 0x61, 0xbc,
    -	0x43, 0x4a, 0x6c, 0x88, 0x63, 0x6e, 0xa8, 0x06, 0xa3, 0x01, 0x09, 0xdd, 0x3b, 0xa4, 0xe2, 0x37,
    -	0xdc, 0xda, 0xde, 0xcc, 0x43, 0xac, 0x79, 0x4f, 0x76, 0xec, 0x32, 0xac, 0x15, 0x88, 0xed, 0x12,
    -	0x3a, 0x14, 0x1b, 0x4c, 0xd1, 0x9b, 0x30, 0x16, 0x90, 0x30, 0x72, 0x82, 0x48, 0xd4, 0x32, 0xa3,
    -	0xec, 0x88, 0x63, 0x58, 0x47, 0xf0, 0xeb, 0x44, 0x5c, 0x4d, 0x8c, 0xc1, 0x26, 0x07, 0xf4, 0x71,
    -	0x69, 0x24, 0x59, 0xf3, 0xdb, 0x5e, 0x14, 0xce, 0x0c, 0xb3, 0x76, 0x67, 0x5a, 0xd3, 0xaf, 0xc7,
    -	0x74, 0x49, 0x2b, 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x93, 0x30, 0xc6, 0xff, 0x73, 0x23, 0x70,
    -	0x38, 0x73, 0x9c, 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0x2e, 0x1c, 0x17, 0xcc, 0xc7, 0x74, 0x68,
    -	0x88, 0x4d, 0x6e, 0x08, 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24,
    -	0x42, 0xa7, 0x7d, 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b,
    -	0x74, 0x0d, 0xc6, 0x03, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x48, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d,
    -	0x42, 0x38, 0xc1, 0x04, 0x5d, 0x85, 0x51, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53,
    -	0xe6, 0x02, 0x51, 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x03, 0x86, 0x1b, 0xee, 0x26, 0xa9, 0xed,
    -	0xd5, 0x1a, 0x64, 0x66, 0x94, 0x71, 0xcb, 0xdc, 0x0c, 0x57, 0x25, 0x11, 0x97, 0xcf, 0xd5, 0x5f,
    -	0x1c, 0x17, 0x47, 0xd7, 0xe1, 0x44, 0x44, 0x82, 0xa6, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42,
    -	0x66, 0xcb, 0x1f, 0x63, 0xb3, 0xeb, 0x8c, 0x18, 0x8d, 0x13, 0x1b, 0x99, 0x54, 0x38, 0xa7, 0x34,
    -	0xba, 0x0d, 0x33, 0x19, 0x18, 0x3e, 0x6f, 0x8f, 0x31, 0xce, 0xaf, 0x09, 0xce, 0x33, 0x1b, 0x39,
    -	0x74, 0x07, 0x1d, 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0x13, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d,
    -	0x51, 0xe1, 0x38, 0xab, 0xf0, 0x71, 0x29, 0x47, 0x94, 0x4d, 0xf4, 0xc1, 0x7e, 0x09, 0xe2, 0x7f,
    -	0x38, 0x59, 0x1a, 0xdd, 0x64, 0x66, 0xe3, 0x76, 0xe0, 0x46, 0x7b, 0x74, 0x55, 0x91, 0xdb, 0xd1,
    -	0xcc, 0x44, 0x47, 0x15, 0x9a, 0x4e, 0xaa, 0x6c, 0xcb, 0x3a, 0x10, 0x27, 0x19, 0xd2, 0xa3, 0x20,
    -	0x8c, 0xea, 0xae, 0x37, 0x33, 0xc9, 0xef, 0x53, 0x72, 0x27, 0xad, 0x52, 0x20, 0xe6, 0x38, 0x66,
    -	0x32, 0xa6, 0x3f, 0xae, 0xd2, 0x13, 0x77, 0x8a, 0x11, 0xc6, 0x26, 0x63, 0x89, 0xc0, 0x31, 0x0d,
    -	0x15, 0x82, 0xa3, 0x68, 0x6f, 0x06, 0x31, 0x52, 0xb5, 0x21, 0x6e, 0x6c, 0x7c, 0x1c, 0x53, 0xb8,
    -	0x7d, 0x13, 0xc6, 0xd5, 0x36, 0xc1, 0xfa, 0x04, 0x95, 0xa0, 0x9f, 0x89, 0x7d, 0x42, 0xe1, 0x3b,
    -	0x4c, 0x9b, 0xc0, 0x44, 0x42, 0xcc, 0xe1, 0xac, 0x09, 0xee, 0x1d, 0xb2, 0xb0, 0x17, 0x11, 0xae,
    -	0x8b, 0x28, 0x6a, 0x4d, 0x90, 0x08, 0x1c, 0xd3, 0xd8, 0xff, 0x9d, 0x8b, 0xcf, 0xf1, 0x29, 0xd1,
    -	0xc3, 0xb9, 0xf8, 0x0c, 0x0c, 0x31, 0x57, 0x15, 0x3f, 0xe0, 0xf6, 0xe4, 0xfe, 0x58, 0x60, 0xbe,
    -	0x2c, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, 0xac, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18,
    -	0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x43, 0xcc, 0x2b, 0xab, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25,
    -	0x93, 0xa1, 0x8a, 0x80, 0x1f, 0x68, 0xbf, 0xb1, 0xa2, 0x46, 0xe7, 0x60, 0x80, 0x36, 0xa1, 0x5c,
    -	0x11, 0xc7, 0xa9, 0xd2, 0x5d, 0x5e, 0x66, 0x50, 0x2c, 0xb0, 0xf6, 0x6f, 0x5a, 0x4c, 0x96, 0x4a,
    -	0xef, 0xf9, 0xe8, 0x32, 0x3b, 0x34, 0xd8, 0x09, 0xa2, 0xe9, 0x0e, 0x1f, 0xd3, 0x4e, 0x02, 0x85,
    -	0x3b, 0x48, 0xfc, 0xc7, 0x46, 0x49, 0xf4, 0x56, 0xf2, 0x64, 0xe0, 0x02, 0xc5, 0x0b, 0xb2, 0x0b,
    -	0x92, 0xa7, 0xc3, 0xc3, 0xf1, 0x11, 0x47, 0xdb, 0xd3, 0xe9, 0x88, 0xb0, 0xff, 0xd7, 0x82, 0x36,
    -	0x4b, 0xaa, 0x91, 0x13, 0x11, 0x54, 0x81, 0xc1, 0x5b, 0x8e, 0x1b, 0xb9, 0xde, 0x96, 0x90, 0xfb,
    -	0x3a, 0x1f, 0x74, 0xac, 0xd0, 0x0d, 0x5e, 0x80, 0x4b, 0x2f, 0xe2, 0x0f, 0x96, 0x6c, 0x28, 0xc7,
    -	0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, 0xd9,
    -	0xa0, 0xb7, 0x01, 0xe4, 0x0e, 0x41, 0xea, 0x42, 0x77, 0xf8, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, 0x0c,
    -	0x57, 0x4e, 0xc6, 0xff, 0xb1, 0xc6, 0xcf, 0x8e, 0xb4, 0x31, 0xd5, 0x1b, 0x83, 0x3e, 0x41, 0x97,
    -	0xa8, 0x13, 0x44, 0xa4, 0x3e, 0x1f, 0x89, 0xce, 0x79, 0xaa, 0xb7, 0xcb, 0xe1, 0x86, 0xdb, 0x24,
    -	0xfa, 0x72, 0x16, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0x95, 0x22, 0xcc, 0xe4, 0x35, 0x97, 0x2e, 0x1a,
    -	0x72, 0xdb, 0x8d, 0x16, 0xa9, 0x58, 0x6b, 0x99, 0x8b, 0x66, 0x59, 0xc0, 0xb1, 0xa2, 0xa0, 0xb3,
    -	0x37, 0x74, 0xb7, 0xe4, 0xdd, 0xbe, 0x3f, 0x9e, 0xbd, 0x55, 0x06, 0xc5, 0x02, 0x4b, 0xe9, 0x02,
    -	0xe2, 0x84, 0xc2, 0x5d, 0x50, 0x9b, 0xe5, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0x5a, 0xc6, 0xbe, 0x2e,
    -	0x5a, 0x46, 0xa3, 0x8b, 0xfa, 0xef, 0x6f, 0x17, 0xa1, 0x4f, 0x01, 0x6c, 0xba, 0x9e, 0x1b, 0x6e,
    -	0x33, 0xee, 0x03, 0x87, 0xe6, 0xae, 0x84, 0xe2, 0x15, 0xc5, 0x05, 0x6b, 0x1c, 0xd1, 0x8b, 0x30,
    -	0xa2, 0x36, 0x90, 0xf2, 0x12, 0x73, 0x56, 0xd0, 0x9c, 0xbf, 0xe2, 0xdd, 0x74, 0x09, 0xeb, 0x74,
    -	0xf6, 0x67, 0x92, 0xf3, 0x45, 0xac, 0x00, 0xad, 0x7f, 0xad, 0x5e, 0xfb, 0xb7, 0xd0, 0xb9, 0x7f,
    -	0xed, 0xdf, 0x1d, 0x84, 0x09, 0xa3, 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x89, 0x1e, 0x40, 0x4e,
    -	0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82,
    -	0xe1, 0x86, 0x13, 0x32, 0x8d, 0x25, 0x11, 0xeb, 0xae, 0x17, 0x66, 0xf1, 0x85, 0xd0, 0x09, 0x23,
    -	0xed, 0xd4, 0xe7, 0xbc, 0x63, 0x96, 0xf4, 0xa4, 0xa4, 0xf2, 0x95, 0xf4, 0x47, 0x55, 0x8d, 0xa0,
    -	0x42, 0xd8, 0x1e, 0xe6, 0x38, 0xf4, 0x32, 0xdb, 0x5a, 0xe9, 0xac, 0x58, 0xa4, 0xd2, 0x28, 0x9b,
    -	0x66, 0xfd, 0x86, 0x90, 0xad, 0x70, 0xd8, 0xa0, 0x8c, 0xef, 0x64, 0x03, 0x1d, 0xee, 0x64, 0x4f,
    -	0xc2, 0x20, 0xfb, 0xa1, 0x66, 0x80, 0x1a, 0x8d, 0x32, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1,
    -	0xde, 0x26, 0x0c, 0xbd, 0xf5, 0x89, 0x49, 0xcd, 0x1c, 0x45, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c,
    -	0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44,
    -	0xed, 0x57, 0xbb, 0x76, 0x7b, 0x3b, 0x9c, 0x9b, 0x4f, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13,
    -	0x51, 0x9a, 0x40, 0x3f, 0x8c, 0x56, 0xdd, 0x30, 0xfa, 0xdc, 0x9f, 0x27, 0x0e, 0xa7, 0x8c, 0x26,
    -	0xa1, 0x6b, 0xfa, 0xe5, 0x6b, 0xe4, 0x90, 0x97, 0xaf, 0xb1, 0xdc, 0x8b, 0xd7, 0x77, 0x27, 0x2e,
    -	0x30, 0xa3, 0xec, 0xcb, 0x1f, 0xef, 0x72, 0x81, 0x11, 0xea, 0xf4, 0x1e, 0xae, 0x31, 0xb3, 0x6d,
    -	0x78, 0x28, 0xa7, 0x8b, 0x32, 0x14, 0xbc, 0x4b, 0xba, 0x82, 0xb7, 0x8b, 0x5a, 0x70, 0x4e, 0x7e,
    -	0xc4, 0xdc, 0x9b, 0x6d, 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0x57, 0x08, 0x3f, 0x05, 0xe3, 0x4b, 0x0e,
    -	0x69, 0xfa, 0xde, 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0xe8, 0x63, 0xd2, 0x0d, 0xdf,
    -	0xdb, 0xfb, 0x68, 0xd3, 0x31, 0x83, 0xd8, 0x5b, 0x70, 0x7c, 0xc9, 0xbf, 0xe5, 0xdd, 0x72, 0x82,
    -	0xfa, 0x7c, 0xa5, 0xac, 0x29, 0xac, 0xd6, 0xa5, 0xc2, 0xc4, 0xca, 0xbf, 0x8e, 0x6a, 0x25, 0x79,
    -	0x2f, 0xac, 0xb8, 0x0d, 0x92, 0xa3, 0x56, 0xfc, 0x3f, 0x0a, 0x46, 0x4d, 0x31, 0xbd, 0x32, 0x8a,
    -	0x59, 0xb9, 0x1e, 0x00, 0x6f, 0xc2, 0xd0, 0xa6, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x45, 0xef, 0x3c,
    -	0x91, 0xef, 0x23, 0xb8, 0x42, 0x29, 0x95, 0xf5, 0x8e, 0xa9, 0x5b, 0x56, 0x44, 0x61, 0xac, 0xd8,
    -	0xa0, 0x1d, 0x98, 0x94, 0x7d, 0x28, 0xb1, 0x62, 0xc3, 0x79, 0xb2, 0xd3, 0xcc, 0x32, 0x99, 0x33,
    -	0x7f, 0x69, 0x9c, 0x60, 0x83, 0x53, 0x8c, 0xd1, 0x29, 0xe8, 0x6b, 0xd2, 0xa3, 0xb5, 0x8f, 0x75,
    -	0x3f, 0xd3, 0xaf, 0x30, 0x55, 0x11, 0x83, 0xda, 0x3f, 0x61, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x2a,
    -	0xb3, 0xfb, 0x3c, 0x0a, 0x49, 0x15, 0x56, 0xa1, 0xbb, 0x0a, 0xcb, 0xfe, 0xff, 0x2c, 0x38, 0xb6,
    -	0xdc, 0x6c, 0x45, 0x7b, 0x4b, 0xae, 0x69, 0xae, 0x7f, 0x09, 0x06, 0x9a, 0xa4, 0xee, 0xb6, 0x9b,
    -	0x62, 0xe4, 0x4a, 0xf2, 0xf8, 0x59, 0x63, 0xd0, 0x83, 0xfd, 0xd2, 0x58, 0x35, 0xf2, 0x03, 0x67,
    -	0x8b, 0x70, 0x00, 0x16, 0xe4, 0xec, 0x10, 0x77, 0xef, 0x90, 0x55, 0xb7, 0xe9, 0x46, 0xf7, 0x36,
    -	0xdb, 0x85, 0xa5, 0x5d, 0x32, 0xc1, 0x31, 0x3f, 0xfb, 0x1b, 0x16, 0x4c, 0xc8, 0x79, 0x3f, 0x5f,
    -	0xaf, 0x07, 0x24, 0x0c, 0xd1, 0x2c, 0x14, 0xdc, 0x96, 0x68, 0x25, 0x88, 0x56, 0x16, 0xca, 0x15,
    -	0x5c, 0x70, 0x5b, 0xf2, 0xbe, 0xc0, 0x4e, 0xb8, 0xa2, 0xe9, 0x74, 0x70, 0x59, 0xc0, 0xb1, 0xa2,
    -	0x40, 0xe7, 0x61, 0xc8, 0xf3, 0xeb, 0x5c, 0xe4, 0x16, 0x46, 0x5c, 0x4a, 0xb9, 0x2e, 0x60, 0x58,
    -	0x61, 0x51, 0x05, 0x86, 0xb9, 0x4b, 0x6a, 0x3c, 0x69, 0x7b, 0x72, 0x6c, 0x65, 0x5f, 0xb6, 0x21,
    -	0x4b, 0xe2, 0x98, 0x89, 0xfd, 0xdb, 0x16, 0x8c, 0xca, 0x2f, 0xeb, 0xf1, 0x32, 0x44, 0x97, 0x56,
    -	0x7c, 0x11, 0x8a, 0x97, 0x16, 0xbd, 0xcc, 0x30, 0x8c, 0x71, 0x87, 0x29, 0x1e, 0xea, 0x0e, 0x73,
    -	0x11, 0x46, 0x9c, 0x56, 0xab, 0x62, 0x5e, 0x80, 0xd8, 0x54, 0x9a, 0x8f, 0xc1, 0x58, 0xa7, 0xb1,
    -	0x7f, 0xbc, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, 0xed, 0x9b, 0x21, 0x89, 0xd0, 0x06, 0x0c, 0x3b, 0x7c,
    -	0x94, 0x88, 0x9c, 0xe4, 0x8f, 0x66, 0x2b, 0xe6, 0x8c, 0x21, 0x8d, 0x25, 0xb9, 0x79, 0x59, 0x1a,
    -	0xc7, 0x8c, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0xa7, 0xba, 0xc2, 0x77, 0xb2, 0x95, 0x26, 0xb9,
    -	0x9f, 0x14, 0xdc, 0xa7, 0xd6, 0x93, 0x5c, 0x70, 0x9a, 0x31, 0x5a, 0x96, 0xca, 0xce, 0x62, 0xbe,
    -	0x96, 0x4a, 0x1f, 0xb8, 0x6c, 0x5d, 0xa7, 0xfd, 0x1b, 0x16, 0x0c, 0x4b, 0xb2, 0xa3, 0x30, 0x8b,
    -	0xaf, 0xc1, 0x60, 0xc8, 0x06, 0x41, 0x76, 0x8d, 0xdd, 0xa9, 0xe1, 0x7c, 0xbc, 0x62, 0x61, 0x85,
    -	0xff, 0x0f, 0xb1, 0xe4, 0xc1, 0x6c, 0x5d, 0xaa, 0xf9, 0xef, 0x11, 0x5b, 0x97, 0x6a, 0x4f, 0xce,
    -	0xa1, 0xf4, 0x97, 0xac, 0xcd, 0x9a, 0xf2, 0x98, 0xca, 0xd4, 0xad, 0x80, 0x6c, 0xba, 0xb7, 0x93,
    -	0x32, 0x75, 0x85, 0x41, 0xb1, 0xc0, 0xa2, 0xb7, 0x61, 0xb4, 0x26, 0x8d, 0x1c, 0xf1, 0x0a, 0x3f,
    -	0xd7, 0xd1, 0xe0, 0xa6, 0x6c, 0xb3, 0x5c, 0x49, 0xb7, 0xa8, 0x95, 0xc7, 0x06, 0x37, 0xd3, 0xe5,
    -	0xaa, 0xd8, 0xcd, 0xe5, 0x2a, 0xe6, 0x9b, 0xef, 0x80, 0xf4, 0x93, 0x16, 0x0c, 0x70, 0xe5, 0x76,
    -	0x6f, 0xb6, 0x05, 0xcd, 0x54, 0x1d, 0xf7, 0xdd, 0x75, 0x0a, 0x14, 0x92, 0x06, 0x5a, 0x83, 0x61,
    -	0xf6, 0x83, 0x29, 0xe7, 0x8b, 0xf9, 0x0f, 0xb4, 0x78, 0xad, 0x7a, 0x03, 0xaf, 0xcb, 0x62, 0x38,
    -	0xe6, 0x60, 0xff, 0x58, 0x91, 0xee, 0x6e, 0x31, 0xa9, 0x71, 0xe8, 0x5b, 0x0f, 0xee, 0xd0, 0x2f,
    -	0x3c, 0xa8, 0x43, 0x7f, 0x0b, 0x26, 0x6a, 0x9a, 0x61, 0x3b, 0x1e, 0xc9, 0xf3, 0x1d, 0x27, 0x89,
    -	0x66, 0x03, 0xe7, 0xea, 0xbf, 0x45, 0x93, 0x09, 0x4e, 0x72, 0x45, 0x9f, 0x80, 0x51, 0x3e, 0xce,
    -	0xa2, 0x16, 0xee, 0xb5, 0xf6, 0x78, 0xfe, 0x7c, 0xd1, 0xab, 0xe0, 0xea, 0x62, 0xad, 0x38, 0x36,
    -	0x98, 0xd9, 0x7f, 0x6b, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x6c, 0x9f, 0xfa,
    -	0xa2, 0x05, 0x33, 0x24, 0x05, 0x5e, 0xf4, 0x9b, 0x4d, 0x71, 0x1b, 0xcd, 0x51, 0x98, 0x2c, 0xe7,
    -	0x94, 0x51, 0x4f, 0xc6, 0x66, 0xf2, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x0d, 0xa6, 0xf9, 0x29, 0xa9,
    -	0x10, 0x9a, 0x9b, 0xd8, 0xc3, 0x82, 0xf1, 0xf4, 0x46, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x63,
    -	0x0c, 0x72, 0x5b, 0xf1, 0xbe, 0x61, 0xee, 0x7d, 0xc3, 0xdc, 0xfb, 0x86, 0xb9, 0xf7, 0x0d, 0x73,
    -	0xef, 0x1b, 0xe6, 0xde, 0x37, 0xcc, 0xbd, 0x47, 0x0d, 0x73, 0xff, 0x9b, 0x05, 0xc7, 0xd5, 0xf1,
    -	0x65, 0x5c, 0xd8, 0x3f, 0x0b, 0xd3, 0x7c, 0xb9, 0x19, 0xde, 0xde, 0xe2, 0xb8, 0xbe, 0x98, 0x39,
    -	0x73, 0x13, 0xaf, 0x12, 0x8c, 0x82, 0xfc, 0x79, 0x57, 0x06, 0x02, 0x67, 0x55, 0x63, 0xff, 0xca,
    -	0x10, 0xf4, 0x2f, 0xef, 0x12, 0x2f, 0x3a, 0x82, 0xab, 0x4d, 0x0d, 0xc6, 0x5d, 0x6f, 0xd7, 0x6f,
    -	0xec, 0x92, 0x3a, 0xc7, 0x1f, 0xe6, 0x06, 0x7e, 0x42, 0xb0, 0x1e, 0x2f, 0x1b, 0x2c, 0x70, 0x82,
    -	0xe5, 0x83, 0x30, 0x6f, 0x5c, 0x82, 0x01, 0x7e, 0xf8, 0x08, 0xdb, 0x46, 0xe6, 0x9e, 0xcd, 0x3a,
    -	0x51, 0x1c, 0xa9, 0xb1, 0xe9, 0x85, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6f, 0xba, 0x41,
    -	0x18, 0x6d, 0xb8, 0x4d, 0x7a, 0x34, 0x34, 0x5b, 0xf7, 0x60, 0xce, 0x50, 0xfd, 0xb0, 0x62, 0x70,
    -	0xc2, 0x09, 0xce, 0x68, 0x0b, 0xc6, 0x1a, 0x8e, 0x5e, 0xd5, 0xe0, 0xa1, 0xab, 0x52, 0xa7, 0xc3,
    -	0xaa, 0xce, 0x08, 0x9b, 0x7c, 0xe9, 0x72, 0xaa, 0x31, 0x8d, 0xfc, 0x10, 0x53, 0x67, 0xa8, 0xe5,
    -	0xc4, 0x55, 0xf1, 0x1c, 0x47, 0x05, 0x34, 0xe6, 0x29, 0x3f, 0x6c, 0x0a, 0x68, 0x9a, 0x3f, 0xfc,
    -	0xa7, 0x61, 0x98, 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xa1, 0xb7, 0xb6, 0xae, 0xb9, 0xb5,
    -	0xc0, 0x37, 0x0d, 0x49, 0xcb, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x45, 0x18, 0x08, 0x49, 0xe0, 0x2a,
    -	0x65, 0x75, 0x87, 0x61, 0x64, 0x64, 0xfc, 0xfd, 0x20, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c,
    -	0xa6, 0x8a, 0x65, 0x87, 0x81, 0x36, 0xbd, 0xe6, 0x19, 0x14, 0x0b, 0x2c, 0x7a, 0x03, 0x06, 0x03,
    -	0xd2, 0x60, 0x96, 0xca, 0xb1, 0xde, 0x27, 0x39, 0x37, 0x7c, 0xf2, 0x72, 0x58, 0x32, 0x40, 0x57,
    -	0x00, 0x05, 0x84, 0x0a, 0x78, 0xae, 0xb7, 0xa5, 0xfc, 0xc7, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c,
    -	0x53, 0xc8, 0xa7, 0xa3, 0x38, 0xa3, 0x18, 0xba, 0x04, 0x53, 0x0a, 0x5a, 0xf6, 0xc2, 0xc8, 0xa1,
    -	0x1b, 0xdc, 0x04, 0xe3, 0xa5, 0xf4, 0x2b, 0x38, 0x49, 0x80, 0xd3, 0x65, 0xec, 0x9f, 0xb7, 0x80,
    -	0xf7, 0xf3, 0x11, 0x68, 0x15, 0x5e, 0x37, 0xb5, 0x0a, 0x27, 0x73, 0x47, 0x2e, 0x47, 0xa3, 0xf0,
    -	0xf3, 0x16, 0x8c, 0x68, 0x23, 0x1b, 0xcf, 0x59, 0xab, 0xc3, 0x9c, 0x6d, 0xc3, 0x24, 0x9d, 0xe9,
    -	0x57, 0x6f, 0x86, 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0xf7, 0x36, 0x31, 0x95, 0xaf, 0xea,
    -	0x6a, 0x82, 0x21, 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0xb9, 0xf6, 0xd6, 0xd4, 0x98, 0x27,
    -	0x5c, 0x7b, 0xd5, 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x6d, 0x3f, 0x8c, 0x92, 0xae, 0xbd, 0x97,
    -	0xfd, 0x30, 0xc2, 0x0c, 0x63, 0x3f, 0x0f, 0xb0, 0x7c, 0x9b, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d,
    -	0x56, 0xfe, 0xa5, 0xc7, 0xfe, 0x23, 0x0b, 0xc6, 0x57, 0x16, 0x8d, 0x93, 0x6b, 0x0e, 0x80, 0xdf,
    -	0xd4, 0x6e, 0xdc, 0x58, 0x97, 0xfe, 0x25, 0xdc, 0xc4, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x27, 0xa1,
    -	0xd8, 0x68, 0x7b, 0x42, 0xed, 0x39, 0x48, 0x8f, 0xc7, 0xd5, 0xb6, 0x87, 0x29, 0x4c, 0x7b, 0x36,
    -	0x56, 0xec, 0xf9, 0xd9, 0x58, 0xd7, 0xe8, 0x35, 0xa8, 0x04, 0xfd, 0xb7, 0x6e, 0xb9, 0x75, 0xfe,
    -	0x28, 0x5f, 0xf8, 0xbe, 0xdc, 0xb8, 0x51, 0x5e, 0x0a, 0x31, 0x87, 0xdb, 0x5f, 0x2a, 0xc2, 0xec,
    -	0x4a, 0x83, 0xdc, 0x7e, 0x97, 0x81, 0x09, 0x7a, 0x7d, 0xf4, 0x76, 0x38, 0x05, 0xd2, 0x61, 0x1f,
    -	0x36, 0x76, 0xef, 0x8f, 0x4d, 0x18, 0xe4, 0x9e, 0xad, 0x32, 0x4c, 0x41, 0xa6, 0x3d, 0x31, 0xbf,
    -	0x43, 0xe6, 0xb8, 0x87, 0xac, 0xb0, 0x27, 0xaa, 0x03, 0x53, 0x40, 0xb1, 0x64, 0x3e, 0xfb, 0x0a,
    -	0x8c, 0xea, 0x94, 0x87, 0x7a, 0x62, 0xfc, 0xfd, 0x45, 0x98, 0xa4, 0x2d, 0x78, 0xa0, 0x03, 0x71,
    -	0x2d, 0x3d, 0x10, 0xf7, 0xfb, 0x99, 0x69, 0xf7, 0xd1, 0x78, 0x3b, 0x39, 0x1a, 0x17, 0xf3, 0x46,
    -	0xe3, 0xa8, 0xc7, 0xe0, 0x07, 0x2c, 0x98, 0x5e, 0x69, 0xf8, 0xb5, 0x9d, 0xc4, 0x53, 0xd0, 0x17,
    -	0x61, 0x84, 0x6e, 0xc7, 0xa1, 0x11, 0x15, 0xc5, 0x88, 0x93, 0x23, 0x50, 0x58, 0xa7, 0xd3, 0x8a,
    -	0x5d, 0xbb, 0x56, 0x5e, 0xca, 0x0a, 0xaf, 0x23, 0x50, 0x58, 0xa7, 0xb3, 0xff, 0xc0, 0x82, 0xd3,
    -	0x97, 0x16, 0x97, 0xe3, 0xa9, 0x98, 0x8a, 0xf0, 0x73, 0x0e, 0x06, 0x5a, 0x75, 0xad, 0x29, 0xb1,
    -	0x5a, 0x78, 0x89, 0xb5, 0x42, 0x60, 0xdf, 0x2b, 0xc1, 0xb4, 0xae, 0x01, 0x5c, 0xc2, 0x95, 0x45,
    -	0xb1, 0xef, 0x4a, 0x2b, 0x90, 0x95, 0x6b, 0x05, 0x7a, 0x1c, 0x06, 0xe9, 0xb9, 0xe0, 0xd6, 0x64,
    -	0xbb, 0xb9, 0xc7, 0x00, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xce, 0x82, 0xe9, 0x4b, 0x6e, 0x44, 0x0f,
    -	0xed, 0x64, 0x08, 0x1b, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0x10, 0x36, 0x58, 0x61,
    -	0xb0, 0x46, 0xc5, 0x3f, 0x68, 0xd7, 0x65, 0x4f, 0x35, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56,
    -	0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x24, 0x11,
    -	0x38, 0xa6, 0xb1, 0xff, 0xda, 0x82, 0xd2, 0x25, 0xfe, 0xe0, 0x74, 0x33, 0xcc, 0xd9, 0x74, 0x9f,
    -	0x87, 0x61, 0x22, 0x0d, 0x04, 0xf2, 0xf1, 0xad, 0x14, 0x44, 0x95, 0xe5, 0x80, 0x47, 0xd2, 0x51,
    -	0x74, 0x3d, 0xbc, 0x57, 0x3f, 0xdc, 0x83, 0xe3, 0x15, 0x40, 0x44, 0xaf, 0x4b, 0x0f, 0x2d, 0xc4,
    -	0x62, 0x94, 0x2c, 0xa7, 0xb0, 0x38, 0xa3, 0x84, 0xfd, 0x13, 0x16, 0x1c, 0x57, 0x1f, 0xfc, 0x9e,
    -	0xfb, 0x4c, 0xfb, 0x6b, 0x05, 0x18, 0xbb, 0xbc, 0xb1, 0x51, 0xb9, 0x44, 0x22, 0x6d, 0x56, 0x76,
    -	0x36, 0xfb, 0x63, 0xcd, 0x7a, 0xd9, 0xe9, 0x8e, 0xd8, 0x8e, 0xdc, 0xc6, 0x1c, 0x0f, 0x98, 0x37,
    -	0x57, 0xf6, 0xa2, 0xab, 0x41, 0x35, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa6, 0x4b, 0x99, 0xa5, 0x98,
    -	0x27, 0xb3, 0xa0, 0xe7, 0x61, 0x80, 0x45, 0xec, 0x93, 0x83, 0xf0, 0xb0, 0xba, 0x62, 0x31, 0xe8,
    -	0xc1, 0x7e, 0x69, 0xf8, 0x1a, 0x2e, 0xf3, 0x3f, 0x58, 0x90, 0xa2, 0x6b, 0x30, 0xb2, 0x1d, 0x45,
    -	0xad, 0xcb, 0xc4, 0xa9, 0x93, 0x40, 0xee, 0xb2, 0x67, 0xb2, 0x76, 0x59, 0xda, 0x09, 0x9c, 0x2c,
    -	0xde, 0x98, 0x62, 0x58, 0x88, 0x75, 0x3e, 0x76, 0x15, 0x20, 0xc6, 0xdd, 0x27, 0xc3, 0x8d, 0xbd,
    -	0x01, 0xc3, 0xf4, 0x73, 0xe7, 0x1b, 0xae, 0xd3, 0xd9, 0x34, 0xfe, 0x34, 0x0c, 0x4b, 0xc3, 0x77,
    -	0x28, 0xe2, 0x69, 0xb0, 0x13, 0x49, 0xda, 0xc5, 0x43, 0x1c, 0xe3, 0xed, 0xc7, 0x40, 0x38, 0xaf,
    -	0x76, 0x62, 0x69, 0x6f, 0xc2, 0x31, 0xe6, 0x85, 0xeb, 0x44, 0xdb, 0xc6, 0x1c, 0xed, 0x3e, 0x19,
    -	0x9e, 0x11, 0xf7, 0x3a, 0xfe, 0x65, 0x33, 0xda, 0xeb, 0xe7, 0x51, 0xc9, 0x31, 0xbe, 0xe3, 0xd9,
    -	0x7f, 0xd5, 0x07, 0x0f, 0x97, 0xab, 0xf9, 0x81, 0xa0, 0x5e, 0x86, 0x51, 0x2e, 0x2e, 0xd2, 0xa9,
    -	0xe1, 0x34, 0x44, 0xbd, 0x4a, 0x03, 0xba, 0xa1, 0xe1, 0xb0, 0x41, 0x89, 0x4e, 0x43, 0xd1, 0x7d,
    -	0xc7, 0x4b, 0xbe, 0x0d, 0x2c, 0xbf, 0xb9, 0x8e, 0x29, 0x9c, 0xa2, 0xa9, 0xe4, 0xc9, 0xb7, 0x74,
    -	0x85, 0x56, 0xd2, 0xe7, 0xeb, 0x30, 0xee, 0x86, 0xb5, 0xd0, 0x2d, 0x7b, 0x74, 0x9d, 0x6a, 0x2b,
    -	0x5d, 0xe9, 0x1c, 0x68, 0xa3, 0x15, 0x16, 0x27, 0xa8, 0xb5, 0xf3, 0xa5, 0xbf, 0x67, 0xe9, 0xb5,
    -	0x6b, 0x18, 0x0a, 0xba, 0xfd, 0xb7, 0xd8, 0xd7, 0x85, 0x4c, 0x05, 0x2f, 0xb6, 0x7f, 0xfe, 0xc1,
    -	0x21, 0x96, 0x38, 0x7a, 0xa1, 0xab, 0x6d, 0x3b, 0xad, 0xf9, 0x76, 0xb4, 0xbd, 0xe4, 0x86, 0x35,
    -	0x7f, 0x97, 0x04, 0x7b, 0xec, 0x2e, 0x3e, 0x14, 0x5f, 0xe8, 0x14, 0x62, 0xf1, 0xf2, 0x7c, 0x85,
    -	0x52, 0xe2, 0x74, 0x19, 0x34, 0x0f, 0x13, 0x12, 0x58, 0x25, 0x21, 0x3b, 0x02, 0x46, 0x18, 0x1b,
    -	0xf5, 0x5a, 0x4f, 0x80, 0x15, 0x93, 0x24, 0xbd, 0x29, 0xe0, 0xc2, 0xfd, 0x10, 0x70, 0x5f, 0x82,
    -	0x31, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x3f, 0xe2, 0xd7, 0x6e, 0xa6, 0x60, 0x2e, 0xeb,
    -	0x08, 0x6c, 0xd2, 0xd9, 0xff, 0xa1, 0x0f, 0xa6, 0xd8, 0xb0, 0xbd, 0x3f, 0xc3, 0xbe, 0x93, 0x66,
    -	0xd8, 0xb5, 0xf4, 0x0c, 0xbb, 0x1f, 0x92, 0xfb, 0x3d, 0x4f, 0xb3, 0xcf, 0xc0, 0xb0, 0x7a, 0xa0,
    -	0x28, 0x5f, 0x28, 0x5b, 0x39, 0x2f, 0x94, 0xbb, 0x9f, 0xde, 0xd2, 0x25, 0xad, 0x98, 0xe9, 0x92,
    -	0xf6, 0x15, 0x0b, 0x62, 0xc3, 0x02, 0x7a, 0x13, 0x86, 0x5b, 0x3e, 0x73, 0xa1, 0x0d, 0xa4, 0x5f,
    -	0xfa, 0x63, 0x1d, 0x2d, 0x13, 0x3c, 0x16, 0x5e, 0xc0, 0x7b, 0xa1, 0x22, 0x8b, 0xe2, 0x98, 0x0b,
    -	0xba, 0x02, 0x83, 0xad, 0x80, 0x54, 0x23, 0x16, 0xa8, 0xa9, 0x77, 0x86, 0x7c, 0xd6, 0xf0, 0x82,
    -	0x58, 0x72, 0xb0, 0x7f, 0xb1, 0x00, 0x93, 0x49, 0x52, 0xf4, 0x1a, 0xf4, 0x91, 0xdb, 0xa4, 0x26,
    -	0xda, 0x9b, 0x79, 0x14, 0xc7, 0xaa, 0x09, 0xde, 0x01, 0xf4, 0x3f, 0x66, 0xa5, 0xd0, 0x65, 0x18,
    -	0xa4, 0xe7, 0xf0, 0x25, 0x15, 0x94, 0xf0, 0x91, 0xbc, 0xb3, 0x5c, 0x09, 0x34, 0xbc, 0x71, 0x02,
    -	0x84, 0x65, 0x71, 0xe6, 0x07, 0x56, 0x6b, 0x55, 0xe9, 0x15, 0x27, 0xea, 0x74, 0x13, 0xdf, 0x58,
    -	0xac, 0x70, 0x22, 0xc1, 0x8d, 0xfb, 0x81, 0x49, 0x20, 0x8e, 0x99, 0xa0, 0x8f, 0x42, 0x7f, 0xd8,
    -	0x20, 0xa4, 0x25, 0x0c, 0xfd, 0x99, 0xca, 0xc5, 0x2a, 0x25, 0x10, 0x9c, 0x98, 0x32, 0x82, 0x01,
    -	0x30, 0x2f, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x1d, 0xe7, 0x1c, 0x6f, 0x8b, 0x1c, 0x81, 0x3e, 0x7e,
    -	0x09, 0xfa, 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x0f, 0x8f, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0x67,
    -	0xe9, 0x3f, 0xcc, 0x4a, 0xdb, 0x3f, 0x08, 0x30, 0x1e, 0x93, 0x95, 0x23, 0xd2, 0x44, 0xcf, 0x1a,
    -	0x71, 0x51, 0x4e, 0x26, 0xe2, 0xa2, 0x0c, 0x33, 0x6a, 0x4d, 0xf5, 0xfb, 0x19, 0x28, 0x36, 0x9d,
    -	0xdb, 0x42, 0xb7, 0xf7, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x6d, 0x7e, 0xfd, 0x7d,
    -	0x5a, 0xae, 0xb1, 0x35, 0xe7, 0x76, 0x57, 0x1f, 0x66, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x4f,
    -	0x58, 0x4f, 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x60, 0x50,
    -	0xb8, 0x6c, 0x8a, 0x10, 0x73, 0x17, 0x7a, 0xa8, 0x4f, 0x78, 0x7c, 0xf2, 0x3a, 0x2f, 0xc8, 0xeb,
    -	0xbd, 0x80, 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0x77, 0x0b, 0xc6, 0xc5, 0x6f, 0x4c, 0xde, 0x69,
    -	0x93, 0x30, 0x12, 0xe2, 0xef, 0x87, 0x7a, 0x6f, 0x83, 0x28, 0xc8, 0x9b, 0xf2, 0x21, 0x79, 0x52,
    -	0x99, 0xc8, 0xae, 0x2d, 0x4a, 0xb4, 0x02, 0xfd, 0xa2, 0x05, 0xc7, 0x9a, 0xce, 0x6d, 0x5e, 0x23,
    -	0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xfa, 0xf0, 0x5a, 0x6f, 0xc3, 0x9f, 0x2a, 0xce, 0x1b, 0x29,
    -	0xed, 0x9c, 0xc7, 0xb2, 0x48, 0xba, 0x36, 0x35, 0xb3, 0x5d, 0xb3, 0x9b, 0x30, 0x24, 0xe7, 0xdb,
    -	0x83, 0xf4, 0x0f, 0x67, 0xf5, 0x88, 0xb9, 0xf6, 0x40, 0xeb, 0xf9, 0x0c, 0x8c, 0xea, 0x73, 0xec,
    -	0x81, 0xd6, 0xf5, 0x0e, 0x4c, 0x67, 0xcc, 0xa5, 0x07, 0x5a, 0xe5, 0x2d, 0x38, 0x99, 0x3b, 0x3f,
    -	0x1e, 0xa8, 0x7f, 0xff, 0xd7, 0x2c, 0x7d, 0x1f, 0x3c, 0x02, 0xa3, 0xc8, 0xa2, 0x69, 0x14, 0x39,
    -	0xd3, 0x79, 0xe5, 0xe4, 0x58, 0x46, 0xde, 0xd6, 0x1b, 0x4d, 0x77, 0x75, 0xf4, 0x06, 0x0c, 0x34,
    -	0x28, 0x44, 0x3a, 0xfe, 0xda, 0xdd, 0x57, 0x64, 0x2c, 0x8e, 0x32, 0x78, 0x88, 0x05, 0x07, 0xfb,
    -	0x57, 0x2d, 0xe8, 0x3b, 0x82, 0x9e, 0xc0, 0x66, 0x4f, 0x3c, 0x9b, 0xcb, 0x5a, 0x04, 0xff, 0x9f,
    -	0xc3, 0xce, 0xad, 0xe5, 0xdb, 0x11, 0xf1, 0x42, 0x76, 0xa6, 0x67, 0x76, 0xcc, 0xbe, 0x05, 0xd3,
    -	0xab, 0xbe, 0x53, 0x5f, 0x70, 0x1a, 0x8e, 0x57, 0x23, 0x41, 0xd9, 0xdb, 0x3a, 0x94, 0xd7, 0x7a,
    -	0xa1, 0xab, 0xd7, 0xfa, 0xcb, 0x30, 0xe0, 0xb6, 0xb4, 0xe8, 0xe1, 0x67, 0x69, 0x07, 0x96, 0x2b,
    -	0x22, 0x70, 0x38, 0x32, 0x2a, 0x67, 0x50, 0x2c, 0xe8, 0xe9, 0xc8, 0x73, 0x77, 0xb1, 0xbe, 0xfc,
    -	0x91, 0xa7, 0x52, 0x7c, 0x32, 0xc6, 0x94, 0xe1, 0xd8, 0xbc, 0x0d, 0x46, 0x15, 0xe2, 0x59, 0x19,
    -	0x86, 0x41, 0x97, 0x7f, 0xa9, 0x18, 0xfe, 0x27, 0xb2, 0xa5, 0xeb, 0x54, 0xc7, 0x68, 0x0f, 0xa6,
    -	0x38, 0x00, 0x4b, 0x46, 0xf6, 0xcb, 0x90, 0x19, 0x13, 0xa4, 0xbb, 0xe6, 0xc4, 0xfe, 0x38, 0x4c,
    -	0xb1, 0x92, 0x87, 0xd4, 0x4a, 0xd8, 0x09, 0x7d, 0x6f, 0x46, 0x20, 0x58, 0xfb, 0xdf, 0x5a, 0x80,
    -	0xd6, 0xfc, 0xba, 0xbb, 0xb9, 0x27, 0x98, 0xf3, 0xef, 0x7f, 0x07, 0x4a, 0xfc, 0xda, 0x97, 0x0c,
    -	0x96, 0xba, 0xd8, 0x70, 0xc2, 0x50, 0xd3, 0x35, 0x3f, 0x21, 0xea, 0x2d, 0x6d, 0x74, 0x26, 0xc7,
    -	0xdd, 0xf8, 0xa1, 0x37, 0x13, 0x91, 0xe0, 0x3e, 0x9c, 0x8a, 0x04, 0xf7, 0x44, 0xa6, 0xc7, 0x47,
    -	0xba, 0xf5, 0x32, 0x42, 0x9c, 0xfd, 0x05, 0x0b, 0x26, 0xd6, 0x13, 0xc1, 0x3f, 0xcf, 0x31, 0xf3,
    -	0x77, 0x86, 0x0d, 0xa5, 0xca, 0xa0, 0x58, 0x60, 0xef, 0xbb, 0x8e, 0xf1, 0x1f, 0x2c, 0x88, 0x63,
    -	0x10, 0x1d, 0x81, 0x54, 0xbb, 0x68, 0x48, 0xb5, 0x99, 0x37, 0x04, 0xd5, 0x9c, 0x3c, 0xa1, 0x16,
    -	0x5d, 0x51, 0x63, 0xd2, 0xe1, 0x72, 0x10, 0xb3, 0xe1, 0xeb, 0x6c, 0xdc, 0x1c, 0x38, 0x35, 0x1a,
    -	0x7f, 0x52, 0x00, 0xa4, 0x68, 0x7b, 0x8e, 0x1e, 0x98, 0x2e, 0x71, 0x7f, 0xa2, 0x07, 0xee, 0x02,
    -	0x62, 0x0e, 0x1c, 0x81, 0xe3, 0x85, 0x9c, 0xad, 0x2b, 0xb4, 0xaa, 0x87, 0xf3, 0x0e, 0x99, 0x95,
    -	0xcf, 0x09, 0x57, 0x53, 0xdc, 0x70, 0x46, 0x0d, 0x9a, 0x63, 0x4e, 0x7f, 0xaf, 0x8e, 0x39, 0x03,
    -	0x5d, 0xde, 0xc5, 0x7e, 0xd5, 0x82, 0x31, 0xd5, 0x4d, 0xef, 0x91, 0xc7, 0x0d, 0xaa, 0x3d, 0x39,
    -	0xe7, 0x4a, 0x45, 0x6b, 0x32, 0x3b, 0x6f, 0xbf, 0x8b, 0xbd, 0x6f, 0x76, 0x1a, 0xee, 0x1d, 0xa2,
    -	0xc2, 0xf2, 0x96, 0xc4, 0x7b, 0x65, 0x01, 0x3d, 0xd8, 0x2f, 0x8d, 0xa9, 0x7f, 0x3c, 0xac, 0x66,
    -	0x5c, 0xc4, 0xfe, 0x69, 0xba, 0xd8, 0xcd, 0xa9, 0x88, 0x5e, 0x84, 0xfe, 0xd6, 0xb6, 0x13, 0x92,
    -	0xc4, 0x23, 0xb0, 0xfe, 0x0a, 0x05, 0x1e, 0xec, 0x97, 0xc6, 0x55, 0x01, 0x06, 0xc1, 0x9c, 0xba,
    -	0xf7, 0x98, 0x8c, 0xe9, 0xc9, 0xd9, 0x35, 0x26, 0xe3, 0xdf, 0x5a, 0xd0, 0xb7, 0x4e, 0x4f, 0xaf,
    -	0x07, 0xbf, 0x05, 0xbc, 0x6e, 0x6c, 0x01, 0xa7, 0xf2, 0x12, 0xd4, 0xe4, 0xae, 0xfe, 0x95, 0xc4,
    -	0xea, 0x3f, 0x93, 0xcb, 0xa1, 0xf3, 0xc2, 0x6f, 0xc2, 0x08, 0x4b, 0x7b, 0x23, 0x1e, 0xbc, 0x3d,
    -	0x6f, 0x2c, 0xf8, 0x52, 0x62, 0xc1, 0x4f, 0x68, 0xa4, 0xda, 0x4a, 0x7f, 0x12, 0x06, 0xc5, 0x0b,
    -	0xaa, 0xe4, 0x33, 0x71, 0x41, 0x8b, 0x25, 0xde, 0xfe, 0xc9, 0x22, 0x18, 0x69, 0x76, 0xd0, 0x6f,
    -	0x58, 0x30, 0x17, 0x70, 0xcf, 0xea, 0xfa, 0x52, 0x3b, 0x70, 0xbd, 0xad, 0x6a, 0x6d, 0x9b, 0xd4,
    -	0xdb, 0x0d, 0xd7, 0xdb, 0x2a, 0x6f, 0x79, 0xbe, 0x02, 0x2f, 0xdf, 0x26, 0xb5, 0x36, 0xb3, 0x7a,
    -	0x76, 0xc9, 0xe9, 0xa3, 0x5e, 0x28, 0x3c, 0x77, 0x77, 0xbf, 0x34, 0x87, 0x0f, 0xc5, 0x1b, 0x1f,
    -	0xb2, 0x2d, 0xe8, 0x0f, 0x2c, 0xb8, 0xc0, 0xd3, 0xbd, 0xf4, 0xde, 0xfe, 0x0e, 0x4a, 0x84, 0x8a,
    -	0x64, 0x15, 0x33, 0xd9, 0x20, 0x41, 0x73, 0xe1, 0x25, 0xd1, 0xa1, 0x17, 0x2a, 0x87, 0xab, 0x0b,
    -	0x1f, 0xb6, 0x71, 0xf6, 0x3f, 0x2d, 0xc2, 0x98, 0x88, 0xdd, 0x27, 0xce, 0x80, 0x17, 0x8d, 0x29,
    -	0xf1, 0x48, 0x62, 0x4a, 0x4c, 0x19, 0xc4, 0xf7, 0x67, 0xfb, 0x0f, 0x61, 0x8a, 0x6e, 0xce, 0x97,
    -	0x89, 0x13, 0x44, 0x37, 0x89, 0xc3, 0xfd, 0xed, 0x8a, 0x87, 0xde, 0xfd, 0x95, 0xe2, 0x77, 0x35,
    -	0xc9, 0x0c, 0xa7, 0xf9, 0x7f, 0x27, 0x9d, 0x39, 0x1e, 0x4c, 0xa6, 0xc2, 0x2f, 0xbe, 0x05, 0xc3,
    -	0xea, 0xf9, 0x8f, 0xd8, 0x74, 0x3a, 0x47, 0x31, 0x4d, 0x72, 0xe0, 0x7a, 0xc5, 0xf8, 0xe9, 0x59,
    -	0xcc, 0xce, 0xfe, 0x47, 0x05, 0xa3, 0x42, 0x3e, 0x88, 0xeb, 0x30, 0xe4, 0x84, 0x2c, 0xb2, 0x72,
    -	0xbd, 0x93, 0xea, 0x37, 0x55, 0x0d, 0x7b, 0x82, 0x35, 0x2f, 0x4a, 0x62, 0xc5, 0x03, 0x5d, 0xe6,
    -	0x5e, 0x8d, 0xbb, 0xa4, 0x93, 0xde, 0x37, 0xc5, 0x0d, 0xa4, 0xdf, 0xe3, 0x2e, 0xc1, 0xa2, 0x3c,
    -	0xfa, 0x24, 0x77, 0x3b, 0xbd, 0xe2, 0xf9, 0xb7, 0xbc, 0x4b, 0xbe, 0x2f, 0xe3, 0xb4, 0xf4, 0xc6,
    -	0x70, 0x4a, 0x3a, 0x9b, 0xaa, 0xe2, 0xd8, 0xe4, 0xd6, 0x5b, 0x3c, 0xe3, 0xcf, 0x02, 0x4b, 0x6f,
    -	0x61, 0xbe, 0xb6, 0x0f, 0x11, 0x81, 0x09, 0x11, 0x18, 0x52, 0xc2, 0x44, 0xdf, 0x65, 0xde, 0x70,
    -	0xcd, 0xd2, 0xb1, 0x85, 0xe2, 0x8a, 0xc9, 0x02, 0x27, 0x79, 0xda, 0x3f, 0x6b, 0x01, 0x7b, 0x79,
    -	0x7c, 0x04, 0xf2, 0xc8, 0x47, 0x4c, 0x79, 0x64, 0x26, 0xaf, 0x93, 0x73, 0x44, 0x91, 0x17, 0xf8,
    -	0xcc, 0xaa, 0x04, 0xfe, 0xed, 0x3d, 0xe1, 0x2b, 0xd4, 0xfd, 0x72, 0x65, 0x7f, 0xc9, 0x02, 0x96,
    -	0xa1, 0x05, 0xf3, 0xbb, 0xb4, 0xd4, 0xec, 0x77, 0x37, 0x83, 0x7f, 0x0c, 0x86, 0x36, 0x89, 0x13,
    -	0xb5, 0x03, 0x11, 0x67, 0xca, 0xec, 0x0b, 0xa3, 0xc1, 0x26, 0xef, 0x15, 0x51, 0x4a, 0xbc, 0x20,
    -	0x14, 0xff, 0xb0, 0xe2, 0x66, 0x87, 0x30, 0x9b, 0x5f, 0x0a, 0x5d, 0x83, 0x87, 0x02, 0x52, 0x6b,
    -	0x07, 0x21, 0x9d, 0xa7, 0xe2, 0x56, 0x22, 0xde, 0xe0, 0x58, 0xec, 0xf6, 0xf2, 0xf0, 0xdd, 0xfd,
    -	0xd2, 0x43, 0x38, 0x9b, 0x04, 0xe7, 0x95, 0xb5, 0xbf, 0x87, 0x1f, 0xb6, 0x2a, 0x34, 0x6e, 0x13,
    -	0xa6, 0x3c, 0xed, 0x3f, 0x3d, 0x5a, 0xe4, 0x1d, 0xfa, 0xb1, 0x6e, 0xc7, 0x29, 0x3b, 0x87, 0xb4,
    -	0xe7, 0xcd, 0x09, 0x36, 0x38, 0xcd, 0xd9, 0xfe, 0x29, 0x0b, 0x1e, 0xd2, 0x09, 0xb5, 0x17, 0x54,
    -	0xdd, 0xec, 0x50, 0x4b, 0x30, 0xe4, 0xb7, 0x48, 0xe0, 0x44, 0x7e, 0x20, 0xce, 0x8f, 0xf3, 0x72,
    -	0x92, 0x5d, 0x15, 0xf0, 0x03, 0x91, 0x5c, 0x44, 0x72, 0x97, 0x70, 0xac, 0x4a, 0xd2, 0x4b, 0x36,
    -	0x53, 0x7e, 0x85, 0xe2, 0xad, 0x1c, 0xdb, 0x0d, 0x98, 0x4b, 0x43, 0x88, 0x05, 0xc6, 0xfe, 0x2b,
    -	0x8b, 0x4f, 0x31, 0xbd, 0xe9, 0xe8, 0x1d, 0x98, 0x6c, 0x3a, 0x51, 0x6d, 0x7b, 0xf9, 0x76, 0x2b,
    -	0xe0, 0x56, 0x3d, 0xd9, 0x4f, 0x4f, 0x77, 0xeb, 0x27, 0xed, 0x23, 0x63, 0x9f, 0xda, 0xb5, 0x04,
    -	0x33, 0x9c, 0x62, 0x8f, 0x6e, 0xc2, 0x08, 0x83, 0xb1, 0x67, 0xa0, 0x61, 0x27, 0x21, 0x21, 0xaf,
    -	0x36, 0xe5, 0x15, 0xb2, 0x16, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0x2b, 0x45, 0xbe, 0xee, 0x99, 0x50,
    -	0xff, 0x24, 0x0c, 0xb6, 0xfc, 0xfa, 0x62, 0x79, 0x09, 0x8b, 0x51, 0x50, 0x07, 0x4a, 0x85, 0x83,
    -	0xb1, 0xc4, 0xa3, 0xf3, 0x30, 0x24, 0x7e, 0x4a, 0x2b, 0x2c, 0x9b, 0xe6, 0x82, 0x2e, 0xc4, 0x0a,
    -	0x8b, 0x9e, 0x03, 0x68, 0x05, 0xfe, 0xae, 0x5b, 0x67, 0x71, 0x67, 0x8a, 0xa6, 0x43, 0x57, 0x45,
    -	0x61, 0xb0, 0x46, 0x85, 0x5e, 0x85, 0xb1, 0xb6, 0x17, 0x72, 0xc1, 0x44, 0x8b, 0xee, 0xad, 0x5c,
    -	0x8d, 0xae, 0xe9, 0x48, 0x6c, 0xd2, 0xa2, 0x79, 0x18, 0x88, 0x1c, 0xe6, 0xa0, 0xd4, 0x9f, 0xef,
    -	0x77, 0xbd, 0x41, 0x29, 0xf4, 0xcc, 0x5f, 0xb4, 0x00, 0x16, 0x05, 0xd1, 0x5b, 0xf2, 0x45, 0x36,
    -	0xdf, 0xe2, 0xc5, 0x83, 0x87, 0xde, 0x8e, 0x03, 0xed, 0x3d, 0xb6, 0x78, 0x48, 0x61, 0xf0, 0x42,
    -	0xaf, 0x00, 0x90, 0xdb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xdc, 0x0a, 0x95, 0x84, 0xb0, 0xe4, 0xaf,
    -	0xfb, 0xd1, 0xb5, 0x90, 0x2c, 0x2b, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x06, 0x00, 0xb1, 0x04, 0x8f,
    -	0xee, 0xc0, 0x50, 0xcd, 0x69, 0x39, 0x35, 0x9e, 0xd6, 0xb2, 0x98, 0xf7, 0x50, 0x36, 0x2e, 0x31,
    -	0xb7, 0x28, 0xc8, 0xb9, 0xe1, 0x41, 0x06, 0x48, 0x1e, 0x92, 0xe0, 0xae, 0xc6, 0x06, 0x55, 0x1f,
    -	0xfa, 0xbc, 0x05, 0x23, 0x22, 0xbc, 0x0e, 0x1b, 0xa1, 0x42, 0xbe, 0xad, 0x48, 0xab, 0x7f, 0x3e,
    -	0x2e, 0xc1, 0x9b, 0xf0, 0xbc, 0x9c, 0xa1, 0x1a, 0xa6, 0x6b, 0x2b, 0xf4, 0x8a, 0xd1, 0x07, 0xe5,
    -	0xa5, 0xb1, 0x68, 0x74, 0xa5, 0xba, 0x34, 0x0e, 0xb3, 0xd3, 0x42, 0xbf, 0x2f, 0x5e, 0x33, 0xee,
    -	0x8b, 0x7d, 0xf9, 0x4f, 0x4e, 0x0d, 0x41, 0xb6, 0xdb, 0x55, 0x11, 0x55, 0xf4, 0xf0, 0x13, 0xfd,
    -	0xf9, 0xef, 0x24, 0xb5, 0x1b, 0x53, 0x97, 0xd0, 0x13, 0x9f, 0x81, 0x89, 0xba, 0x29, 0x0e, 0x88,
    -	0x99, 0xf8, 0x44, 0x1e, 0xdf, 0x84, 0xf4, 0x10, 0x0b, 0x00, 0x09, 0x04, 0x4e, 0x32, 0x46, 0x15,
    -	0x1e, 0x8d, 0xa4, 0xec, 0x6d, 0xfa, 0xe2, 0xd1, 0x8d, 0x9d, 0x3b, 0x96, 0x7b, 0x61, 0x44, 0x9a,
    -	0x94, 0x32, 0x3e, 0xe7, 0xd7, 0x45, 0x59, 0xac, 0xb8, 0xa0, 0x37, 0x60, 0x80, 0x3d, 0x94, 0x0b,
    -	0x67, 0x86, 0xf2, 0x55, 0xf2, 0x66, 0xdc, 0xc7, 0x78, 0x41, 0xb2, 0xbf, 0x21, 0x16, 0x1c, 0xd0,
    -	0x65, 0xf9, 0x0c, 0x35, 0x2c, 0x7b, 0xd7, 0x42, 0xc2, 0x9e, 0xa1, 0x0e, 0x2f, 0x3c, 0x16, 0xbf,
    -	0x30, 0xe5, 0xf0, 0xcc, 0xfc, 0xa0, 0x46, 0x49, 0x2a, 0x4f, 0x89, 0xff, 0x32, 0xed, 0xa8, 0x88,
    -	0x52, 0x95, 0xd9, 0x3c, 0x33, 0x35, 0x69, 0xdc, 0x9d, 0xd7, 0x4d, 0x16, 0x38, 0xc9, 0x93, 0xca,
    -	0xa6, 0x7c, 0xd5, 0x8b, 0x67, 0x3b, 0xdd, 0xf6, 0x0e, 0x7e, 0x25, 0x67, 0xa7, 0x11, 0x87, 0x60,
    -	0x51, 0x1e, 0xb9, 0x30, 0x11, 0x18, 0x22, 0x82, 0x0c, 0x2e, 0x75, 0xae, 0x37, 0x39, 0x44, 0x0b,
    -	0x5b, 0x6e, 0xb2, 0xc1, 0x49, 0xbe, 0xb3, 0x3b, 0x30, 0x66, 0x6c, 0x10, 0x0f, 0xd4, 0xe4, 0xe5,
    -	0xc1, 0x64, 0x72, 0x37, 0x78, 0xa0, 0x96, 0xae, 0xbf, 0xe8, 0x83, 0x71, 0x73, 0xf6, 0xa2, 0x0b,
    -	0x30, 0x2c, 0x98, 0xa8, 0x2c, 0x41, 0x6a, 0x41, 0xae, 0x49, 0x04, 0x8e, 0x69, 0x58, 0x72, 0x28,
    -	0x56, 0x5c, 0x73, 0x09, 0x8f, 0x93, 0x43, 0x29, 0x0c, 0xd6, 0xa8, 0xe8, 0x6d, 0xee, 0xa6, 0xef,
    -	0x47, 0xea, 0xec, 0x53, 0x53, 0x7c, 0x81, 0x41, 0xb1, 0xc0, 0xd2, 0x33, 0x6f, 0x87, 0x04, 0x1e,
    -	0x69, 0x98, 0x41, 0xe7, 0xd5, 0x99, 0x77, 0x45, 0x47, 0x62, 0x93, 0x96, 0x9e, 0xdc, 0x7e, 0xc8,
    -	0xd6, 0x8c, 0xb8, 0x33, 0xc6, 0x2e, 0xf6, 0x55, 0x1e, 0x2c, 0x40, 0xe2, 0xd1, 0xc7, 0xe1, 0x21,
    -	0x15, 0xe0, 0x4d, 0xcc, 0x08, 0x59, 0xe3, 0x80, 0xa1, 0xe2, 0x79, 0x68, 0x31, 0x9b, 0x0c, 0xe7,
    -	0x95, 0x47, 0xaf, 0xc3, 0xb8, 0xb8, 0x57, 0x48, 0x8e, 0x83, 0xa6, 0xbf, 0xd8, 0x15, 0x03, 0x8b,
    -	0x13, 0xd4, 0x32, 0x6c, 0x3e, 0x13, 0xed, 0x25, 0x87, 0xa1, 0x74, 0xd8, 0x7c, 0x1d, 0x8f, 0x53,
    -	0x25, 0xd0, 0x3c, 0x4c, 0x70, 0x71, 0xcf, 0xf5, 0xb6, 0xf8, 0x98, 0x88, 0x07, 0x7c, 0x6a, 0x21,
    -	0x5c, 0x35, 0xd1, 0x38, 0x49, 0x8f, 0x5e, 0x86, 0x51, 0x27, 0xa8, 0x6d, 0xbb, 0x11, 0xa9, 0x51,
    -	0x69, 0x9c, 0xb9, 0x6c, 0x69, 0x0e, 0x77, 0xf3, 0x1a, 0x0e, 0x1b, 0x94, 0xf6, 0x1d, 0x98, 0xce,
    -	0x88, 0x22, 0x42, 0x27, 0x8e, 0xd3, 0x72, 0xe5, 0x37, 0x25, 0xbc, 0xda, 0xe7, 0x2b, 0x65, 0xf9,
    -	0x35, 0x1a, 0x15, 0x9d, 0x9d, 0x2c, 0xda, 0x88, 0x96, 0xd0, 0x58, 0xcd, 0xce, 0x15, 0x89, 0xc0,
    -	0x31, 0x8d, 0xfd, 0x77, 0x05, 0x98, 0xc8, 0xb0, 0x56, 0xb1, 0xa4, 0xba, 0x89, 0x0b, 0x4e, 0x9c,
    -	0x43, 0xd7, 0xcc, 0xc2, 0x50, 0x38, 0x44, 0x16, 0x86, 0x62, 0xb7, 0x2c, 0x0c, 0x7d, 0xef, 0x26,
    -	0x0b, 0x83, 0xd9, 0x63, 0xfd, 0x3d, 0xf5, 0x58, 0x46, 0xe6, 0x86, 0x81, 0x43, 0x66, 0x6e, 0x30,
    -	0x3a, 0x7d, 0xb0, 0x87, 0x4e, 0xff, 0xb1, 0x02, 0x4c, 0x26, 0x0d, 0x5d, 0x47, 0xa0, 0x2c, 0x7e,
    -	0xc3, 0x50, 0x16, 0x9f, 0xef, 0xe5, 0xc1, 0x75, 0xae, 0xe2, 0x18, 0x27, 0x14, 0xc7, 0x4f, 0xf5,
    -	0xc4, 0xad, 0xb3, 0x12, 0xf9, 0xff, 0x2e, 0xc0, 0xf1, 0x4c, 0xfb, 0xdf, 0x11, 0xf4, 0xcd, 0x55,
    -	0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x31, 0x7a, 0x6e, 0x07, 0xdd, 0x48, 0x74, 0xd0, 0x85, 0xde, 0x59,
    -	0x76, 0xee, 0xa5, 0x6f, 0x14, 0xe1, 0x4c, 0x66, 0xb9, 0x58, 0xd7, 0xba, 0x62, 0xe8, 0x5a, 0x9f,
    -	0x4b, 0xe8, 0x5a, 0xed, 0xce, 0xa5, 0xef, 0x8f, 0xf2, 0x55, 0x3c, 0xca, 0x66, 0xa1, 0x25, 0xee,
    -	0x51, 0xf1, 0x6a, 0x3c, 0xca, 0x56, 0x8c, 0xb0, 0xc9, 0xf7, 0x3b, 0x49, 0xe1, 0xfa, 0xfb, 0x16,
    -	0x9c, 0xcc, 0x1c, 0x9b, 0x23, 0x50, 0xb0, 0xad, 0x9b, 0x0a, 0xb6, 0x27, 0x7b, 0x9e, 0xad, 0x39,
    -	0x1a, 0xb7, 0x2f, 0x0c, 0xe4, 0x7c, 0x0b, 0x53, 0x1a, 0x5c, 0x85, 0x11, 0xa7, 0x56, 0x23, 0x61,
    -	0xb8, 0xe6, 0xd7, 0x55, 0xc0, 0xf6, 0x67, 0xd9, 0x95, 0x2e, 0x06, 0x1f, 0xec, 0x97, 0x66, 0x93,
    -	0x2c, 0x62, 0x34, 0xd6, 0x39, 0xa0, 0x4f, 0xc2, 0x50, 0x28, 0x73, 0xed, 0xf5, 0xdd, 0x7b, 0xae,
    -	0x3d, 0xa6, 0x8f, 0x50, 0x4a, 0x11, 0xc5, 0x12, 0x7d, 0xb7, 0x1e, 0xe4, 0xa7, 0x83, 0x46, 0x8f,
    -	0x37, 0xf2, 0x1e, 0x42, 0xfd, 0x3c, 0x07, 0xb0, 0xab, 0x6e, 0x1f, 0x49, 0x85, 0x87, 0x76, 0x2f,
    -	0xd1, 0xa8, 0xd0, 0x47, 0x61, 0x32, 0xe4, 0xf1, 0x2d, 0x63, 0x8f, 0x0d, 0x3e, 0x17, 0x59, 0x88,
    -	0xb0, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, 0x45, 0xd6, 0xca, 0x7c, 0x73, 0xf8, 0xf4, 0x3c, 0x17,
    -	0xd7, 0x28, 0xfc, 0x73, 0x8e, 0x25, 0x07, 0x81, 0x75, 0xbf, 0x56, 0x12, 0x7d, 0x12, 0x80, 0x4e,
    -	0x22, 0xa1, 0xf8, 0x18, 0xcc, 0xdf, 0x42, 0xe9, 0xde, 0x52, 0xcf, 0x74, 0x58, 0x67, 0xaf, 0xa9,
    -	0x97, 0x14, 0x13, 0xac, 0x31, 0x44, 0x0e, 0x8c, 0xc5, 0xff, 0xe2, 0xbc, 0xd7, 0xe7, 0x73, 0x6b,
    -	0x48, 0x32, 0x67, 0xda, 0xf6, 0x25, 0x9d, 0x05, 0x36, 0x39, 0xa2, 0x4f, 0xc0, 0xc9, 0xdd, 0x5c,
    -	0x37, 0x98, 0xe1, 0x38, 0x95, 0x65, 0xbe, 0xf3, 0x4b, 0x7e, 0x79, 0xfb, 0x5f, 0x02, 0x3c, 0xdc,
    -	0x61, 0xa7, 0x47, 0xf3, 0xa6, 0x09, 0xfb, 0xe9, 0xa4, 0x36, 0x62, 0x36, 0xb3, 0xb0, 0xa1, 0x9e,
    -	0x48, 0x2c, 0xa8, 0xc2, 0xbb, 0x5e, 0x50, 0x3f, 0x62, 0x69, 0x7a, 0x22, 0xee, 0x43, 0xfc, 0x91,
    -	0x43, 0x9e, 0x60, 0xf7, 0x51, 0x71, 0xb4, 0x99, 0xa1, 0x7d, 0x79, 0xae, 0xe7, 0xe6, 0xf4, 0xae,
    -	0x8e, 0xf9, 0x5a, 0x76, 0x48, 0x6a, 0xae, 0x98, 0xb9, 0x74, 0xd8, 0xef, 0x3f, 0xaa, 0xf0, 0xd4,
    -	0x7f, 0x62, 0xc1, 0xc9, 0x14, 0x98, 0xb7, 0x81, 0x84, 0x22, 0xa8, 0xd9, 0xfa, 0xbb, 0x6e, 0xbc,
    -	0x64, 0xc8, 0xbf, 0xe1, 0xb2, 0xf8, 0x86, 0x93, 0xb9, 0x74, 0xc9, 0xa6, 0x7f, 0xf1, 0xcf, 0x4b,
    -	0xd3, 0xac, 0x02, 0x93, 0x10, 0xe7, 0x37, 0x1d, 0xb5, 0xe0, 0x6c, 0xad, 0x1d, 0x04, 0xf1, 0x64,
    -	0xcd, 0x58, 0x9c, 0xfc, 0xae, 0xf7, 0xd8, 0xdd, 0xfd, 0xd2, 0xd9, 0xc5, 0x2e, 0xb4, 0xb8, 0x2b,
    -	0x37, 0xe4, 0x01, 0x6a, 0xa6, 0x9c, 0xcd, 0x44, 0xba, 0xfb, 0x4c, 0xdd, 0x49, 0xda, 0x35, 0x8d,
    -	0xbf, 0x9a, 0xcd, 0x70, 0x59, 0xcb, 0xe0, 0x7c, 0xb4, 0xda, 0x93, 0x6f, 0x4d, 0x38, 0xf0, 0xd9,
    -	0x55, 0x38, 0xd3, 0x79, 0x32, 0x1d, 0xea, 0xc5, 0xfe, 0x1f, 0x59, 0x70, 0xba, 0x63, 0x58, 0xa8,
    -	0x6f, 0xc3, 0xcb, 0x82, 0xfd, 0x39, 0x0b, 0x1e, 0xc9, 0x2c, 0x61, 0xf8, 0x35, 0x5e, 0x80, 0xe1,
    -	0x5a, 0x22, 0x59, 0x73, 0x1c, 0x20, 0x45, 0x25, 0x6a, 0x8e, 0x69, 0x0c, 0xf7, 0xc5, 0x42, 0x57,
    -	0xf7, 0xc5, 0xdf, 0xb6, 0x20, 0x75, 0xd4, 0x1f, 0x81, 0xe4, 0x59, 0x36, 0x25, 0xcf, 0xc7, 0x7a,
    -	0xe9, 0xcd, 0x1c, 0xa1, 0xf3, 0x6f, 0x26, 0xe0, 0x44, 0xce, 0x83, 0xdb, 0x5d, 0x98, 0xda, 0xaa,
    -	0x11, 0x33, 0xc2, 0x42, 0xa7, 0xc8, 0x63, 0x1d, 0xc3, 0x31, 0xf0, 0x1c, 0xd9, 0x29, 0x12, 0x9c,
    -	0xae, 0x02, 0x7d, 0xce, 0x82, 0x63, 0xce, 0xad, 0x70, 0x99, 0xde, 0x20, 0xdc, 0xda, 0x42, 0xc3,
    -	0xaf, 0xed, 0x50, 0xc1, 0x4c, 0x2e, 0xab, 0x17, 0x32, 0x15, 0xc8, 0x37, 0xaa, 0x29, 0x7a, 0xa3,
    -	0xfa, 0x99, 0xbb, 0xfb, 0xa5, 0x63, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x84, 0x45, 0x4e, 0x22, 0x27,
    -	0xda, 0xee, 0x14, 0x03, 0x24, 0xeb, 0x65, 0x34, 0x17, 0x89, 0x25, 0x06, 0x2b, 0x3e, 0xe8, 0xd3,
    -	0x30, 0xbc, 0x25, 0x9f, 0xfb, 0x67, 0x88, 0xdc, 0x71, 0x47, 0x76, 0x0e, 0x82, 0xc0, 0xfd, 0x41,
    -	0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa1, 0xe8, 0x6d, 0x86, 0x22, 0x12, 0x59, 0xb6, 0x5b, 0xaa,
    -	0xe9, 0xf8, 0xcb, 0x23, 0xed, 0xac, 0xaf, 0x54, 0x31, 0x2d, 0x88, 0x2e, 0x43, 0x31, 0xb8, 0x59,
    -	0x17, 0xd6, 0x8f, 0xcc, 0x45, 0x8a, 0x17, 0x96, 0x72, 0x5a, 0xc5, 0x38, 0xe1, 0x85, 0x25, 0x4c,
    -	0x59, 0xa0, 0x0a, 0xf4, 0xb3, 0x57, 0xaa, 0x42, 0xb4, 0xcd, 0xbc, 0xca, 0x77, 0x78, 0xed, 0xcd,
    -	0x5f, 0xc0, 0x31, 0x02, 0xcc, 0x19, 0xa1, 0x0d, 0x18, 0xa8, 0xb1, 0x04, 0xf4, 0x42, 0x96, 0xfd,
    -	0x60, 0xa6, 0x9d, 0xa3, 0x43, 0x66, 0x7e, 0xa1, 0xf6, 0x67, 0x14, 0x58, 0xf0, 0x62, 0x5c, 0x49,
    -	0x6b, 0x7b, 0x53, 0x9e, 0x58, 0xd9, 0x5c, 0x49, 0x6b, 0x7b, 0xa5, 0xda, 0x91, 0x2b, 0xa3, 0xc0,
    -	0x82, 0x17, 0x7a, 0x05, 0x0a, 0x9b, 0x35, 0xf1, 0x02, 0x35, 0xd3, 0xe0, 0x61, 0x06, 0x4b, 0x5a,
    -	0x18, 0xb8, 0xbb, 0x5f, 0x2a, 0xac, 0x2c, 0xe2, 0xc2, 0x66, 0x0d, 0xad, 0xc3, 0xe0, 0x26, 0x0f,
    -	0xaf, 0x22, 0x6c, 0x1a, 0x4f, 0x64, 0x47, 0x7e, 0x49, 0x45, 0x60, 0xe1, 0xaf, 0x19, 0x05, 0x02,
    -	0x4b, 0x26, 0x2c, 0x45, 0x8e, 0x0a, 0x13, 0x23, 0xa2, 0x54, 0xce, 0x1d, 0x2e, 0xb4, 0x0f, 0xbf,
    -	0x6a, 0xc4, 0xc1, 0x66, 0xb0, 0xc6, 0x91, 0xce, 0x6a, 0xe7, 0x4e, 0x3b, 0x60, 0x29, 0x0c, 0x44,
    -	0x38, 0xb3, 0xcc, 0x59, 0x3d, 0x2f, 0x89, 0x3a, 0xcd, 0x6a, 0x45, 0x84, 0x63, 0xa6, 0x68, 0x07,
    -	0xc6, 0x76, 0xc3, 0xd6, 0x36, 0x91, 0x4b, 0x9a, 0x45, 0x37, 0xcb, 0x91, 0x66, 0xaf, 0x0b, 0x42,
    -	0x37, 0x88, 0xda, 0x4e, 0x23, 0xb5, 0x0b, 0xb1, 0x6b, 0xcd, 0x75, 0x9d, 0x19, 0x36, 0x79, 0xd3,
    -	0xee, 0x7f, 0xa7, 0xed, 0xdf, 0xdc, 0x8b, 0x88, 0x08, 0x2e, 0x99, 0xd9, 0xfd, 0x6f, 0x72, 0x92,
    -	0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x75, 0xd1, 0x3d, 0x6c, 0xf7, 0x9c, 0xcc, 0x8f, 0x5c,
    -	0x3d, 0x2f, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8,
    -	0xf7, 0x12, 0x3b, 0xf4, 0x54, 0xfe, 0x2e, 0x59, 0xc9, 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38,
    -	0xb3, 0x2e, 0x54, 0x87, 0xf1, 0x96, 0x1f, 0x44, 0xb7, 0xfc, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x51,
    -	0x6a, 0x50, 0x8a, 0x1a, 0x59, 0xdc, 0x56, 0x13, 0x83, 0x13, 0x3c, 0xd1, 0xc7, 0x60, 0x30, 0xac,
    -	0x39, 0x0d, 0x52, 0xbe, 0x3a, 0x33, 0x9d, 0x7f, 0xfc, 0x54, 0x39, 0x49, 0xce, 0xec, 0xe2, 0xd1,
    -	0x71, 0x38, 0x09, 0x96, 0xec, 0xd0, 0x0a, 0xf4, 0xb3, 0xd4, 0xb3, 0x2c, 0x12, 0x6a, 0x4e, 0x00,
    -	0xee, 0xd4, 0x1b, 0x13, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, 0xe9, 0x1a, 0x10, 0x9a, 0x02, 0x3f,
    -	0x9c, 0x39, 0x9e, 0xbf, 0x06, 0x84, 0x82, 0xe1, 0x6a, 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66,
    -	0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe8, 0xe0, 0x3f, 0x98, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee,
    -	0xa4, 0x94, 0x85, 0xfd, 0x9b, 0x43, 0x69, 0x99, 0x85, 0x69, 0x98, 0xfe, 0x67, 0x2b, 0xe5, 0xe7,
    -	0xf0, 0xa1, 0x5e, 0x15, 0xde, 0xf7, 0xf1, 0xe2, 0xfa, 0x39, 0x0b, 0x4e, 0xb4, 0x32, 0x3f, 0x44,
    -	0x08, 0x00, 0xbd, 0xe9, 0xcd, 0xf9, 0xa7, 0xab, 0xa8, 0xb9, 0xd9, 0x78, 0x9c, 0x53, 0x53, 0x52,
    -	0x39, 0x50, 0x7c, 0xd7, 0xca, 0x81, 0x35, 0x18, 0xaa, 0xf1, 0x9b, 0x9c, 0x8c, 0xf6, 0xde, 0x53,
    -	0xcc, 0x47, 0x26, 0x4a, 0x88, 0x2b, 0xe0, 0x26, 0x56, 0x2c, 0xd0, 0x8f, 0x5a, 0x70, 0x3a, 0xd9,
    -	0x74, 0x4c, 0x18, 0x5a, 0x84, 0xda, 0xe5, 0x6a, 0xad, 0x15, 0xf1, 0xfd, 0x29, 0xf9, 0xdf, 0x20,
    -	0x3e, 0xe8, 0x46, 0x80, 0x3b, 0x57, 0x86, 0x96, 0x32, 0xf4, 0x6a, 0x03, 0xa6, 0x45, 0xb1, 0x07,
    -	0xdd, 0xda, 0x0b, 0x30, 0xda, 0xf4, 0xdb, 0x5e, 0x24, 0xdc, 0x0d, 0x85, 0xc3, 0x13, 0x73, 0xf4,
    -	0x59, 0xd3, 0xe0, 0xd8, 0xa0, 0x4a, 0x68, 0xe4, 0x86, 0xee, 0x59, 0x23, 0xf7, 0x36, 0x8c, 0x7a,
    -	0x9a, 0x7f, 0x7c, 0xa7, 0x1b, 0xac, 0xd0, 0x2e, 0x6a, 0xd4, 0xbc, 0x95, 0x3a, 0x04, 0x1b, 0xdc,
    -	0x3a, 0x6b, 0xcb, 0xe0, 0xdd, 0x69, 0xcb, 0x8e, 0xf4, 0x4a, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc6,
    -	0xc0, 0xb5, 0x72, 0xaf, 0x99, 0x5a, 0xb9, 0x73, 0x49, 0xad, 0x5c, 0xca, 0x54, 0x65, 0x28, 0xe4,
    -	0x7a, 0xcf, 0x79, 0xd7, 0x73, 0x1c, 0xdf, 0xef, 0xb7, 0xe0, 0x21, 0x66, 0xfb, 0xa0, 0x15, 0xbc,
    -	0x6b, 0x7b, 0x07, 0x73, 0x05, 0x5d, 0xcd, 0x66, 0x87, 0xf3, 0xea, 0xb1, 0x1b, 0x70, 0xb6, 0xdb,
    -	0xb9, 0xcb, 0x1c, 0x6b, 0xeb, 0xca, 0x39, 0x22, 0x76, 0xac, 0xad, 0x97, 0x97, 0x30, 0xc3, 0xf4,
    -	0x1a, 0xa5, 0xce, 0xfe, 0x8f, 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x08, 0x6e, 0xf4, 0x1f, 0x31, 0x6e,
    -	0xf4, 0x0f, 0x67, 0x9f, 0xf8, 0xf5, 0x5c, 0x63, 0xdf, 0x72, 0xc2, 0xd8, 0x77, 0x3a, 0x8f, 0x41,
    -	0x67, 0xd3, 0xde, 0x4f, 0x17, 0x61, 0xa4, 0xe2, 0xd7, 0xd5, 0x3a, 0xfb, 0x67, 0xf7, 0xf2, 0xaa,
    -	0x25, 0x37, 0xc9, 0x90, 0xc6, 0x99, 0x79, 0xe1, 0xca, 0x38, 0x07, 0xdf, 0x66, 0x8f, 0x5b, 0x6e,
    -	0x10, 0x77, 0x6b, 0x3b, 0x22, 0xf5, 0xe4, 0xe7, 0x1c, 0xdd, 0xe3, 0x96, 0x6f, 0x16, 0x61, 0x22,
    -	0x51, 0x3b, 0x6a, 0xc0, 0x58, 0x43, 0x37, 0x25, 0x89, 0x79, 0x7a, 0x4f, 0x56, 0x28, 0xf1, 0x38,
    -	0x40, 0x03, 0x61, 0x93, 0x39, 0x9a, 0x03, 0x50, 0xbe, 0x15, 0x52, 0xdb, 0xcf, 0xae, 0x35, 0xca,
    -	0xf9, 0x22, 0xc4, 0x1a, 0x05, 0x7a, 0x11, 0x46, 0x22, 0xbf, 0xe5, 0x37, 0xfc, 0xad, 0xbd, 0x2b,
    -	0x44, 0x06, 0x30, 0x54, 0x8e, 0xbe, 0x1b, 0x31, 0x0a, 0xeb, 0x74, 0xe8, 0x36, 0x4c, 0x29, 0x26,
    -	0xd5, 0xfb, 0x60, 0x5e, 0x63, 0x6a, 0x93, 0xf5, 0x24, 0x47, 0x9c, 0xae, 0x04, 0xbd, 0x02, 0xe3,
    -	0xcc, 0xe3, 0x98, 0x95, 0xbf, 0x42, 0xf6, 0x64, 0x60, 0x5b, 0x26, 0x61, 0xaf, 0x19, 0x18, 0x9c,
    -	0xa0, 0x44, 0x8b, 0x30, 0xd5, 0x74, 0xc3, 0x44, 0xf1, 0x01, 0x56, 0x9c, 0x35, 0x60, 0x2d, 0x89,
    -	0xc4, 0x69, 0x7a, 0xfb, 0xe7, 0xc4, 0x18, 0x7b, 0x91, 0xfb, 0xfe, 0x72, 0x7c, 0x6f, 0x2f, 0xc7,
    -	0x6f, 0x58, 0x30, 0x49, 0x6b, 0x67, 0x6e, 0x94, 0x52, 0x90, 0x52, 0xa9, 0x0f, 0xac, 0x0e, 0xa9,
    -	0x0f, 0xce, 0xd1, 0x6d, 0xbb, 0xee, 0xb7, 0x23, 0xa1, 0x1d, 0xd5, 0xf6, 0x65, 0x0a, 0xc5, 0x02,
    -	0x2b, 0xe8, 0x48, 0x10, 0x88, 0x47, 0xe0, 0x3a, 0x1d, 0x09, 0x02, 0x2c, 0xb0, 0x32, 0x33, 0x42,
    -	0x5f, 0x76, 0x66, 0x04, 0x1e, 0xe0, 0x5a, 0x78, 0xc1, 0x09, 0x91, 0x56, 0x0b, 0x70, 0x2d, 0xdd,
    -	0xe3, 0x62, 0x1a, 0xfb, 0x6b, 0x45, 0x18, 0xad, 0xf8, 0xf5, 0xd8, 0xb1, 0xe3, 0x05, 0xc3, 0xb1,
    -	0xe3, 0x6c, 0xc2, 0xb1, 0x63, 0x52, 0xa7, 0x7d, 0xdf, 0x8d, 0xe3, 0x5b, 0xe5, 0xc6, 0xf1, 0x5b,
    -	0x16, 0x1b, 0xb5, 0xa5, 0xf5, 0x2a, 0xf7, 0xca, 0x45, 0x17, 0x61, 0x84, 0xed, 0x70, 0x2c, 0xea,
    -	0x80, 0xf4, 0x76, 0x60, 0x99, 0x0a, 0xd7, 0x63, 0x30, 0xd6, 0x69, 0xd0, 0x79, 0x18, 0x0a, 0x89,
    -	0x13, 0xd4, 0xb6, 0xd5, 0xf6, 0x2e, 0x5c, 0x13, 0x38, 0x0c, 0x2b, 0x2c, 0x7a, 0x33, 0x8e, 0xad,
    -	0x5c, 0xcc, 0x77, 0xf1, 0xd5, 0xdb, 0xc3, 0x97, 0x48, 0x7e, 0x40, 0x65, 0xfb, 0x06, 0xa0, 0x34,
    -	0x7d, 0x0f, 0xcf, 0x9e, 0x4a, 0x66, 0xf4, 0xcf, 0xe1, 0x54, 0xe4, 0xcf, 0xbf, 0xb7, 0x60, 0xbc,
    -	0xe2, 0xd7, 0xe9, 0xd2, 0xfd, 0x4e, 0x5a, 0xa7, 0x7a, 0x60, 0xf9, 0x81, 0x0e, 0x81, 0xe5, 0x1f,
    -	0x85, 0xfe, 0x8a, 0x5f, 0xef, 0x12, 0xa1, 0xf4, 0xff, 0xb1, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc0,
    -	0xf0, 0xf2, 0x9a, 0x69, 0x78, 0x79, 0x28, 0x67, 0xde, 0xe4, 0xd8, 0x5a, 0xfe, 0xaf, 0x3e, 0x18,
    -	0xa3, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, 0x66, 0xf5, 0xd0, 0x6d, 0xf4, 0x1a, 0xe0, 0x37,
    -	0x1a, 0xfe, 0xad, 0xe4, 0xb0, 0xae, 0x30, 0x28, 0x16, 0x58, 0xf4, 0x0c, 0x0c, 0xb5, 0x02, 0xb2,
    -	0xeb, 0xfa, 0x42, 0xbe, 0xd6, 0xcc, 0x58, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, 0xbc, 0x43, 0xd7,
    -	0xa3, 0xb2, 0x44, 0xcd, 0xf7, 0xea, 0xdc, 0x36, 0x51, 0x14, 0xd9, 0x8f, 0x34, 0x38, 0x36, 0xa8,
    -	0xd0, 0x0d, 0x18, 0x66, 0xff, 0xd9, 0xb6, 0x73, 0xf8, 0xc4, 0xee, 0x22, 0x1f, 0xac, 0x60, 0x80,
    -	0x63, 0x5e, 0xe8, 0x39, 0x80, 0x48, 0x66, 0x10, 0x09, 0x45, 0xa4, 0x4a, 0x75, 0x17, 0x51, 0xb9,
    -	0x45, 0x42, 0xac, 0x51, 0xa1, 0xa7, 0x61, 0x38, 0x72, 0xdc, 0xc6, 0xaa, 0xeb, 0x31, 0xfb, 0x3d,
    -	0x6d, 0xbf, 0x48, 0xcb, 0x2a, 0x80, 0x38, 0xc6, 0x53, 0x59, 0x90, 0xc5, 0x20, 0x5a, 0xd8, 0x8b,
    -	0x44, 0x06, 0xb2, 0x22, 0x97, 0x05, 0x57, 0x15, 0x14, 0x6b, 0x14, 0x68, 0x1b, 0x4e, 0xb9, 0x1e,
    -	0xcb, 0x14, 0x44, 0xaa, 0x3b, 0x6e, 0x6b, 0x63, 0xb5, 0x7a, 0x9d, 0x04, 0xee, 0xe6, 0xde, 0x82,
    -	0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xd9, 0xfd, 0x98, 0x68, 0xe2, 0xa9, 0x72, 0x07, 0x5a, 0xdc, 0x91,
    -	0x93, 0xfd, 0x3c, 0x9b, 0xef, 0x57, 0xab, 0xe8, 0x29, 0x63, 0xeb, 0x38, 0xa1, 0x6f, 0x1d, 0x07,
    -	0xfb, 0xa5, 0x81, 0xab, 0x55, 0x2d, 0x10, 0xce, 0xcb, 0x70, 0xbc, 0xe2, 0xd7, 0x2b, 0x7e, 0x10,
    -	0xad, 0xf8, 0xc1, 0x2d, 0x27, 0xa8, 0xcb, 0xe9, 0x55, 0x92, 0xa1, 0x80, 0xe8, 0xfe, 0xd9, 0xcf,
    -	0x77, 0x17, 0x23, 0xcc, 0xcf, 0xf3, 0x4c, 0x62, 0x3b, 0xe4, 0x1b, 0xcf, 0x1a, 0x93, 0x1d, 0x54,
    -	0xae, 0xad, 0x4b, 0x4e, 0x44, 0xd0, 0x55, 0x18, 0xab, 0xe9, 0xc7, 0xa8, 0x28, 0xfe, 0xa4, 0x3c,
    -	0xc8, 0x8c, 0x33, 0x36, 0xf3, 0xdc, 0x35, 0xcb, 0xdb, 0xdf, 0x23, 0x2a, 0xe1, 0x8a, 0x08, 0xee,
    -	0xd2, 0xda, 0x4b, 0x56, 0x7b, 0x99, 0x8c, 0xa7, 0x90, 0x1f, 0x68, 0x91, 0xdb, 0x95, 0x3b, 0x26,
    -	0xe3, 0xb1, 0xbf, 0x17, 0x4e, 0x24, 0xab, 0xef, 0x39, 0xb5, 0xfe, 0x22, 0x4c, 0x05, 0x7a, 0x41,
    -	0x2d, 0xb3, 0xe1, 0x71, 0x9e, 0x40, 0x25, 0x81, 0xc4, 0x69, 0x7a, 0xfb, 0x45, 0x98, 0xa2, 0x97,
    -	0x5f, 0x25, 0xc8, 0xb1, 0x5e, 0xee, 0x1e, 0x13, 0xe9, 0x8f, 0x07, 0xd8, 0x41, 0x94, 0x48, 0x73,
    -	0x85, 0x3e, 0x05, 0xe3, 0x21, 0x59, 0x75, 0xbd, 0xf6, 0x6d, 0xa9, 0x5b, 0xeb, 0xf0, 0xb8, 0xb9,
    -	0xba, 0xac, 0x53, 0xf2, 0xfb, 0x83, 0x09, 0xc3, 0x09, 0x6e, 0xa8, 0x09, 0xe3, 0xb7, 0x5c, 0xaf,
    -	0xee, 0xdf, 0x0a, 0x25, 0xff, 0xa1, 0x7c, 0x45, 0xfd, 0x0d, 0x4e, 0x99, 0x68, 0xa3, 0x51, 0xdd,
    -	0x0d, 0x83, 0x19, 0x4e, 0x30, 0xa7, 0x8b, 0x3d, 0x68, 0x7b, 0xf3, 0xe1, 0xb5, 0x90, 0xf0, 0x47,
    -	0xaa, 0x62, 0xb1, 0x63, 0x09, 0xc4, 0x31, 0x9e, 0x2e, 0x76, 0xf6, 0xe7, 0x52, 0xe0, 0xb7, 0x79,
    -	0x4e, 0x25, 0xb1, 0xd8, 0xb1, 0x82, 0x62, 0x8d, 0x82, 0x6e, 0x86, 0xec, 0xdf, 0xba, 0xef, 0x61,
    -	0xdf, 0x8f, 0xe4, 0xf6, 0xc9, 0x72, 0x02, 0x6a, 0x70, 0x6c, 0x50, 0xa1, 0x15, 0x40, 0x61, 0xbb,
    -	0xd5, 0x6a, 0x30, 0xd7, 0x45, 0xa7, 0xc1, 0x58, 0x71, 0xb7, 0xab, 0x22, 0xf7, 0x6e, 0xa9, 0xa6,
    -	0xb0, 0x38, 0xa3, 0x04, 0x3d, 0x17, 0x37, 0x45, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0x51, 0xaf, 0xca,
    -	0xdb, 0x29, 0x71, 0x68, 0x19, 0x06, 0xc3, 0xbd, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x19, 0x18, 0xab,
    -	0x8c, 0x44, 0x4b, 0x00, 0xcc, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x5a, 0x70, 0x5c, 0xdc, 0x76,
    -	0x3c, 0x95, 0x17, 0x8e, 0x7b, 0xef, 0x5d, 0xbc, 0xbb, 0x5f, 0x9a, 0x16, 0x35, 0xeb, 0xe8, 0x83,
    -	0xfd, 0x12, 0x5d, 0x1c, 0x19, 0x18, 0x9c, 0xc5, 0x8d, 0x4f, 0xbe, 0x5a, 0xcd, 0x6f, 0xb6, 0x2a,
    -	0x81, 0xbf, 0xe9, 0x36, 0x48, 0x27, 0xc3, 0x68, 0xd5, 0xa0, 0x14, 0x93, 0xcf, 0x80, 0xe1, 0x04,
    -	0x37, 0x74, 0x13, 0x26, 0x9c, 0x56, 0x6b, 0x3e, 0x68, 0xfa, 0x81, 0xac, 0x60, 0x24, 0x5f, 0xc3,
    -	0x3e, 0x6f, 0x92, 0xf2, 0xb4, 0x70, 0x09, 0x20, 0x4e, 0x32, 0xb4, 0xbf, 0x87, 0xc9, 0xa7, 0x55,
    -	0x77, 0xcb, 0x63, 0xef, 0xc6, 0x51, 0x13, 0xc6, 0x5a, 0x6c, 0x07, 0x13, 0xd9, 0x94, 0xc4, 0x7a,
    -	0x7a, 0xa1, 0x47, 0x1d, 0xdb, 0x2d, 0x96, 0x0f, 0xd2, 0xf0, 0xb5, 0xac, 0xe8, 0xec, 0xb0, 0xc9,
    -	0xdd, 0xfe, 0x57, 0x27, 0x99, 0x84, 0x53, 0xe5, 0x8a, 0xb3, 0x41, 0xf1, 0x0a, 0x4e, 0x5c, 0x95,
    -	0x67, 0xf3, 0x55, 0xd4, 0xf1, 0xd0, 0x8b, 0x97, 0x74, 0x58, 0x96, 0x45, 0x9f, 0x84, 0x71, 0x7a,
    -	0xf3, 0x54, 0x52, 0x46, 0x38, 0x73, 0x2c, 0x3f, 0x6e, 0x91, 0xa2, 0xd2, 0x33, 0xad, 0xe9, 0x85,
    -	0x71, 0x82, 0x19, 0x7a, 0x93, 0xb9, 0x1f, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x9e, 0x86, 0x92,
    -	0xad, 0xc6, 0x04, 0xb5, 0x61, 0x3a, 0x9d, 0x4f, 0x36, 0x9c, 0xb1, 0xf3, 0x45, 0xf8, 0x74, 0x4a,
    -	0xd8, 0x38, 0x25, 0x56, 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0xed, 0xb3, 0x68, 0x28,
    -	0xb7, 0x53, 0x19, 0x3f, 0xc7, 0x3a, 0x26, 0xfa, 0xdc, 0x82, 0xd3, 0x5a, 0xc2, 0xc4, 0x4b, 0x81,
    -	0xc3, 0xdc, 0x5f, 0x5c, 0xb6, 0x65, 0x6b, 0xb2, 0xd7, 0x23, 0x77, 0xf7, 0x4b, 0xa7, 0x37, 0x3a,
    -	0x11, 0xe2, 0xce, 0x7c, 0xd0, 0x55, 0x38, 0xce, 0xa3, 0x6e, 0x2c, 0x11, 0xa7, 0xde, 0x70, 0x3d,
    -	0x25, 0xdc, 0xf1, 0x6d, 0xe5, 0xe4, 0xdd, 0xfd, 0xd2, 0xf1, 0xf9, 0x2c, 0x02, 0x9c, 0x5d, 0x0e,
    -	0xbd, 0x06, 0xc3, 0x75, 0x2f, 0x14, 0x7d, 0x30, 0x60, 0xe4, 0xa4, 0x1c, 0x5e, 0x5a, 0xaf, 0xaa,
    -	0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, 0xe2, 0xd6, 0x15, 0xa5, 0x12, 0x1b, 0x4c, 0x05, 0x63,
    -	0x4c, 0x6a, 0x8d, 0x8d, 0xd7, 0xf6, 0xdc, 0xac, 0xa8, 0x5e, 0x86, 0x19, 0x0f, 0xf1, 0x0d, 0xc6,
    -	0xe8, 0x0d, 0x40, 0x22, 0xf7, 0xc9, 0x7c, 0x8d, 0xa5, 0xea, 0xd2, 0x5c, 0x1e, 0xd5, 0x4d, 0xb7,
    -	0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, 0x65, 0xba, 0x73, 0xe9, 0x50, 0xb1, 0x33, 0xaa, 0xcc, 0xc7,
    -	0x4b, 0xa4, 0x15, 0x10, 0xe6, 0xa5, 0x67, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0xa7, 0x9c, 0x76,
    -	0xe4, 0x33, 0xc3, 0x95, 0x49, 0xba, 0xe1, 0xef, 0x10, 0x8f, 0xd9, 0x8c, 0x87, 0x58, 0x90, 0xc7,
    -	0x53, 0xf3, 0x1d, 0xe8, 0x70, 0x47, 0x2e, 0x54, 0xea, 0xa7, 0x7d, 0xa1, 0xd9, 0x94, 0x8c, 0x87,
    -	0xc3, 0xdc, 0xd0, 0x2a, 0x29, 0xd0, 0x8b, 0x30, 0xb2, 0xed, 0x87, 0xd1, 0x3a, 0x89, 0x6e, 0xf9,
    -	0xc1, 0x8e, 0x08, 0xb6, 0x1e, 0x27, 0xb8, 0x88, 0x51, 0x58, 0xa7, 0xa3, 0xd7, 0x7a, 0xe6, 0xd1,
    -	0x54, 0x5e, 0x62, 0xce, 0x24, 0x43, 0xf1, 0x1e, 0x73, 0x99, 0x83, 0xb1, 0xc4, 0x4b, 0xd2, 0x72,
    -	0x65, 0x91, 0x39, 0x86, 0x24, 0x48, 0xcb, 0x95, 0x45, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e,
    -	0x40, 0x2a, 0x81, 0x5f, 0x23, 0xa1, 0x96, 0x56, 0xe5, 0x61, 0x1e, 0x4a, 0x9e, 0x4e, 0xd7, 0x6a,
    -	0x16, 0x01, 0xce, 0x2e, 0x87, 0x48, 0x3a, 0x59, 0xe8, 0x78, 0xbe, 0x45, 0x2f, 0x2d, 0x33, 0xf5,
    -	0x98, 0x2f, 0xd4, 0x83, 0x49, 0x95, 0xa6, 0x94, 0x07, 0x8f, 0x0f, 0x67, 0x26, 0xd8, 0xdc, 0xee,
    -	0x3d, 0xf2, 0xbc, 0xb2, 0x91, 0x96, 0x13, 0x9c, 0x70, 0x8a, 0xb7, 0x11, 0x45, 0x74, 0xb2, 0x6b,
    -	0x14, 0xd1, 0x0b, 0x30, 0x1c, 0xb6, 0x6f, 0xd6, 0xfd, 0xa6, 0xe3, 0x7a, 0xcc, 0x31, 0x44, 0xbb,
    -	0x5f, 0x56, 0x25, 0x02, 0xc7, 0x34, 0x68, 0x05, 0x86, 0x1c, 0x69, 0x00, 0x45, 0xf9, 0x01, 0xd2,
    -	0x94, 0xd9, 0x93, 0xc7, 0x0c, 0x92, 0x26, 0x4f, 0x55, 0x16, 0xbd, 0x0a, 0x63, 0x22, 0x56, 0x84,
    -	0xc8, 0xec, 0x3d, 0x6d, 0xbe, 0xb2, 0xad, 0xea, 0x48, 0x6c, 0xd2, 0xa2, 0x6b, 0x30, 0x12, 0xf9,
    -	0x0d, 0xf6, 0x54, 0x94, 0x8a, 0x92, 0x27, 0xf2, 0xe3, 0x98, 0x6e, 0x28, 0x32, 0x5d, 0x35, 0xaf,
    -	0x8a, 0x62, 0x9d, 0x0f, 0xda, 0xe0, 0xf3, 0x9d, 0x25, 0x51, 0x21, 0xa1, 0x48, 0x0d, 0x7d, 0x3a,
    -	0xcf, 0xab, 0x8f, 0x91, 0x99, 0xcb, 0x41, 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x09, 0xa6, 0x5a, 0x81,
    -	0xeb, 0xb3, 0x39, 0xa1, 0x0c, 0xba, 0x33, 0x66, 0xca, 0xc4, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0,
    -	0x50, 0x1f, 0x02, 0x38, 0x73, 0x92, 0xa7, 0x7d, 0xe2, 0xd7, 0x75, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6,
    -	0xd8, 0x4e, 0xcc, 0x35, 0x4d, 0x33, 0xb3, 0xf9, 0x31, 0xd9, 0x74, 0x8d, 0x14, 0x17, 0x90, 0xd5,
    -	0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0xdb, 0x32, 0xbd, 0x66, 0x84, 0x33, 0xa7, 0x3a, 0xb8, 0x95,
    -	0x26, 0x6e, 0x7e, 0xb1, 0x40, 0x60, 0x80, 0x43, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, 0x93, 0xe2, 0x1d,
    -	0x7c, 0xdc, 0x4d, 0xa7, 0xe3, 0xa7, 0x37, 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x76, 0xc9, 0xb9,
    -	0xd9, 0x20, 0x62, 0xeb, 0x5b, 0x75, 0xbd, 0x9d, 0x70, 0xe6, 0x0c, 0xdb, 0x1f, 0x44, 0xda, 0xa5,
    -	0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, 0xd9, 0x0a, 0x08, 0x69, 0xb2, 0xcb, 0x84, 0x38, 0xcf,
    -	0x4a, 0x3c, 0xd2, 0x0d, 0x6d, 0x49, 0x25, 0x81, 0x3b, 0xc8, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x0b,
    -	0x86, 0xfc, 0x5d, 0x12, 0x6c, 0x13, 0xa7, 0x3e, 0x73, 0xb6, 0xc3, 0x83, 0x30, 0x71, 0xb8, 0x5d,
    -	0x15, 0xb4, 0x09, 0x7f, 0x19, 0x09, 0xee, 0xee, 0x2f, 0x23, 0x2b, 0x43, 0xff, 0x8b, 0x05, 0x27,
    -	0xa5, 0x05, 0xaa, 0xda, 0xa2, 0xbd, 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0xd8, 0x2c, 0x8f, 0xe4,
    -	0xc7, 0x2b, 0xd9, 0xc8, 0x29, 0xa4, 0x94, 0xdd, 0x27, 0xf3, 0x28, 0x42, 0x9c, 0x5f, 0x23, 0xbd,
    -	0xfe, 0x86, 0x24, 0x92, 0x9b, 0xd1, 0x7c, 0xb8, 0xf2, 0xe6, 0xd2, 0xfa, 0xcc, 0xa3, 0x3c, 0xb0,
    -	0x0c, 0x5d, 0x0c, 0xd5, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0x45, 0x28, 0xf8, 0xe1, 0xcc, 0x63, 0x1d,
    -	0x12, 0x74, 0xfb, 0xf5, 0xab, 0x55, 0xee, 0x37, 0x79, 0xb5, 0x8a, 0x0b, 0x7e, 0x28, 0x53, 0x1f,
    -	0xd1, 0x3b, 0x5f, 0x38, 0xf3, 0x38, 0x57, 0x8d, 0xca, 0xd4, 0x47, 0x0c, 0x88, 0x63, 0x3c, 0xda,
    -	0x86, 0x89, 0xd0, 0xb8, 0x5b, 0x87, 0x33, 0xe7, 0x58, 0x4f, 0x3d, 0x9e, 0x37, 0x68, 0x06, 0xb5,
    -	0x96, 0x93, 0xc4, 0xe4, 0x82, 0x93, 0x6c, 0xf9, 0xea, 0xd2, 0x6e, 0xf7, 0xe1, 0xcc, 0x13, 0x5d,
    -	0x56, 0x97, 0x46, 0xac, 0xaf, 0x2e, 0x9d, 0x07, 0x4e, 0xf0, 0x9c, 0xfd, 0x2e, 0x98, 0x4a, 0x89,
    -	0x4b, 0x87, 0x79, 0x23, 0x30, 0xbb, 0x03, 0x63, 0xc6, 0x94, 0x7c, 0xa0, 0x2e, 0x24, 0xbf, 0x3f,
    -	0x0c, 0xc3, 0xca, 0xb4, 0x8f, 0x2e, 0x98, 0x5e, 0x23, 0x27, 0x93, 0x5e, 0x23, 0x43, 0x15, 0xbf,
    -	0x6e, 0x38, 0x8a, 0x6c, 0x64, 0x04, 0x22, 0xcd, 0xdb, 0x00, 0x7b, 0x7f, 0xc8, 0xa4, 0x99, 0x2b,
    -	0x8a, 0x3d, 0xbb, 0x9f, 0xf4, 0x75, 0xb4, 0x80, 0x5c, 0x82, 0x29, 0xcf, 0x67, 0x32, 0x3a, 0xa9,
    -	0x4b, 0x01, 0x8c, 0xc9, 0x59, 0xc3, 0x7a, 0x3c, 0xaf, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72,
    -	0x41, 0x29, 0x69, 0x72, 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0x99, 0xc9,
    -	0xfc, 0xbb, 0x21, 0x2f, 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0x0b, 0x43, 0xcb, 0xaf, 0x97,
    -	0x2b, 0x42, 0xcc, 0xd7, 0x42, 0x84, 0xd7, 0xcb, 0x15, 0xcc, 0x71, 0x68, 0x1e, 0x06, 0xd8, 0x0f,
    -	0x19, 0x27, 0x25, 0x6f, 0x99, 0x96, 0x2b, 0x5a, 0xea, 0x45, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0x83,
    -	0x4c, 0xef, 0x46, 0x4c, 0x83, 0x3c, 0x78, 0x8f, 0x1a, 0x64, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x6d,
    -	0x38, 0x6e, 0xdc, 0x47, 0xd5, 0xcb, 0x2e, 0xc8, 0x37, 0x2e, 0x27, 0x88, 0x17, 0x4e, 0x8b, 0x46,
    -	0x1f, 0x2f, 0x67, 0x71, 0xc2, 0xd9, 0x15, 0xa0, 0x06, 0x4c, 0xd5, 0x52, 0xb5, 0x0e, 0xf5, 0x5e,
    -	0xab, 0x9a, 0x17, 0xe9, 0x1a, 0xd3, 0x8c, 0xd1, 0xab, 0x30, 0xf4, 0x8e, 0xcf, 0x1d, 0xc1, 0xc4,
    -	0xd5, 0x44, 0x46, 0x15, 0x19, 0x7a, 0xf3, 0x6a, 0x95, 0xc1, 0x0f, 0xf6, 0x4b, 0x23, 0x15, 0xbf,
    -	0x2e, 0xff, 0x62, 0x55, 0x00, 0xfd, 0x90, 0x05, 0xb3, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x58, 0xef,
    -	0x8d, 0xb6, 0x45, 0xa5, 0xb3, 0xcb, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe9, 0x7a, 0x0a,
    -	0xdd, 0x3b, 0x44, 0xe4, 0xad, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0xb0, 0x5f, 0x9a, 0xe0, 0x3b,
    -	0xa3, 0x7b, 0x47, 0x05, 0x33, 0xe7, 0x05, 0xd0, 0xf7, 0xc2, 0xf1, 0x20, 0xad, 0xa5, 0x25, 0x52,
    -	0x08, 0x7f, 0xaa, 0x97, 0x5d, 0x36, 0x39, 0xe0, 0x38, 0x8b, 0x21, 0xce, 0xae, 0xc7, 0xfe, 0x75,
    -	0x8b, 0xe9, 0xd0, 0x45, 0xb3, 0x48, 0xd8, 0x6e, 0x1c, 0x45, 0xb6, 0xfc, 0x65, 0xc3, 0x3e, 0x7d,
    -	0xcf, 0xde, 0x53, 0xbf, 0x6b, 0x31, 0xef, 0xa9, 0x23, 0x7c, 0x07, 0xf6, 0x26, 0x0c, 0x45, 0xa2,
    -	0xb6, 0x4e, 0x09, 0xfe, 0xb5, 0x46, 0x31, 0x0f, 0x32, 0x75, 0xc9, 0x91, 0x50, 0xac, 0xd8, 0xd8,
    -	0xff, 0x84, 0x8f, 0x80, 0xc4, 0x1c, 0x81, 0x19, 0x70, 0xc9, 0x34, 0x03, 0x96, 0xba, 0x7c, 0x41,
    -	0x8e, 0x39, 0xf0, 0x1f, 0x9b, 0xed, 0x66, 0xca, 0xbd, 0xf7, 0xba, 0xdb, 0x9e, 0xfd, 0x05, 0x0b,
    -	0x20, 0xce, 0x1e, 0xd1, 0x43, 0x9e, 0xda, 0x97, 0xe9, 0xb5, 0xc6, 0x8f, 0xfc, 0x9a, 0xdf, 0x10,
    -	0x46, 0x90, 0x53, 0xb1, 0x25, 0x92, 0xc3, 0x0f, 0xb4, 0xdf, 0x58, 0x51, 0xa3, 0x92, 0x0c, 0xe7,
    -	0x5a, 0x8c, 0x6d, 0xe3, 0x46, 0x28, 0xd7, 0x2f, 0x5b, 0x70, 0x2c, 0xeb, 0x51, 0x01, 0xbd, 0x24,
    -	0x73, 0x35, 0xa7, 0x72, 0xa9, 0x54, 0xa3, 0x79, 0x5d, 0xc0, 0xb1, 0xa2, 0xe8, 0x39, 0x01, 0xf0,
    -	0xe1, 0x32, 0x1b, 0x5c, 0x85, 0xb1, 0x4a, 0x40, 0x34, 0xf9, 0xe2, 0x75, 0x1e, 0xad, 0x87, 0xb7,
    -	0xe7, 0x99, 0x43, 0x47, 0xea, 0xb1, 0xbf, 0x52, 0x80, 0x63, 0xdc, 0x31, 0x68, 0x7e, 0xd7, 0x77,
    -	0xeb, 0x15, 0xbf, 0x2e, 0x9e, 0x82, 0xbe, 0x05, 0xa3, 0x2d, 0x4d, 0x37, 0xdd, 0x29, 0x4a, 0xb7,
    -	0xae, 0xc3, 0x8e, 0xb5, 0x69, 0x3a, 0x14, 0x1b, 0xbc, 0x50, 0x1d, 0x46, 0xc9, 0xae, 0x5b, 0x53,
    -	0xde, 0x25, 0x85, 0x43, 0x1f, 0xd2, 0xaa, 0x96, 0x65, 0x8d, 0x0f, 0x36, 0xb8, 0xf6, 0xec, 0xce,
    -	0xab, 0x89, 0x68, 0x7d, 0x5d, 0x3c, 0x4a, 0x7e, 0xdc, 0x82, 0x87, 0x72, 0x62, 0x7a, 0xd3, 0xea,
    -	0x6e, 0x31, 0x17, 0x2c, 0x31, 0x6d, 0x55, 0x75, 0xdc, 0x31, 0x0b, 0x0b, 0x2c, 0xfa, 0x18, 0x00,
    -	0x77, 0xac, 0x22, 0x5e, 0xad, 0x6b, 0xf0, 0x63, 0x23, 0x5a, 0xab, 0x16, 0x78, 0x53, 0x96, 0xc7,
    -	0x1a, 0x2f, 0xfb, 0xcb, 0x7d, 0xd0, 0xcf, 0x1c, 0x79, 0x50, 0x05, 0x06, 0xb7, 0x79, 0x5c, 0xb8,
    -	0x8e, 0xe3, 0x46, 0x69, 0x65, 0xa0, 0xb9, 0x78, 0xdc, 0x34, 0x28, 0x96, 0x6c, 0xd0, 0x1a, 0x4c,
    -	0xf3, 0x2c, 0x84, 0x8d, 0x25, 0xd2, 0x70, 0xf6, 0xa4, 0xda, 0x97, 0x27, 0xd6, 0x57, 0xea, 0xef,
    -	0x72, 0x9a, 0x04, 0x67, 0x95, 0x43, 0xaf, 0xc3, 0x38, 0xbd, 0x86, 0xfb, 0xed, 0x48, 0x72, 0xe2,
    -	0xf9, 0x07, 0xd5, 0xcd, 0x64, 0xc3, 0xc0, 0xe2, 0x04, 0x35, 0x7a, 0x15, 0xc6, 0x5a, 0x29, 0x05,
    -	0x77, 0x7f, 0xac, 0x09, 0x32, 0x95, 0xda, 0x26, 0x2d, 0x7b, 0x57, 0xd0, 0x66, 0xaf, 0x28, 0x36,
    -	0xb6, 0x03, 0x12, 0x6e, 0xfb, 0x8d, 0x3a, 0x93, 0x80, 0xfb, 0xb5, 0x77, 0x05, 0x09, 0x3c, 0x4e,
    -	0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, 0xb2, 0x92, 0xc0,
    -	0xe3, 0x54, 0x89, 0xee, 0x9a, 0xfb, 0xc1, 0xfb, 0xa3, 0xb9, 0xb7, 0x7f, 0xa6, 0x00, 0xc6, 0xd0,
    -	0x7e, 0x07, 0xe7, 0x45, 0x7c, 0x0d, 0xfa, 0xb6, 0x82, 0x56, 0x4d, 0x38, 0xad, 0x65, 0x7e, 0x59,
    -	0x9c, 0x14, 0x9d, 0x7f, 0x19, 0xfd, 0x8f, 0x59, 0x29, 0xba, 0xc6, 0x8f, 0x57, 0x02, 0x9f, 0x1e,
    -	0x72, 0x32, 0x74, 0xa4, 0x7a, 0xbe, 0x33, 0x28, 0x03, 0x51, 0x74, 0x08, 0xb2, 0x2c, 0xde, 0x20,
    -	0x70, 0x0e, 0x86, 0x7f, 0x57, 0x55, 0x84, 0x9b, 0x91, 0x5c, 0xd0, 0x45, 0x18, 0x11, 0xa9, 0xea,
    -	0xd8, 0x2b, 0x13, 0xbe, 0x98, 0x98, 0x3f, 0xda, 0x52, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, 0xc3, 0x05,
    -	0x98, 0xce, 0x78, 0x26, 0xc8, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0xde, 0x75, 0xed, 0x18, 0xe1,
    -	0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0xe1, 0x08, 0xec, 0x21,
    -	0x33, 0x98, 0x9f, 0x85, 0xbe, 0x76, 0x48, 0x64, 0xa0, 0x74, 0x75, 0x6c, 0x33, 0xd3, 0x39, 0xc3,
    -	0xd0, 0x2b, 0xe0, 0x96, 0xb2, 0x42, 0x6b, 0x57, 0x40, 0x6e, 0x87, 0xe6, 0x38, 0xda, 0xb8, 0x88,
    -	0x78, 0x8e, 0x17, 0x89, 0x8b, 0x62, 0x1c, 0xe7, 0x97, 0x41, 0xb1, 0xc0, 0xda, 0x5f, 0x2a, 0xc2,
    -	0xc9, 0xdc, 0x87, 0xc3, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x39, 0xfa, 0xf1, 0xd8, 0xbe,
    -	0xa4, 0xb5, 0xbd, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0xd0, 0xcf, 0x94, 0xe2, 0xa9, 0x0c, 0xf4,
    -	0x0b, 0x4b, 0x3c, 0x02, 0x23, 0x47, 0x6b, 0xa7, 0x7a, 0xb1, 0xe3, 0xa9, 0xfe, 0x28, 0x95, 0x60,
    -	0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, 0xb8, 0xe8, 0xaf, 0x84,
    -	0x67, 0x1b, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x93, 0x30, 0xb8, 0x43, 0xf6, 0x02, 0xd7, 0xdb,
    -	0x4a, 0x7a, 0x3c, 0x5e, 0xe1, 0x60, 0x2c, 0xf1, 0x66, 0x32, 0xe4, 0xc1, 0xfb, 0x91, 0x0c, 0x59,
    -	0x9f, 0x01, 0x43, 0x5d, 0xc5, 0x93, 0x1f, 0x29, 0xc2, 0x04, 0x5e, 0x58, 0x7a, 0x7f, 0x20, 0xae,
    -	0xa5, 0x07, 0xe2, 0x7e, 0xe4, 0x0c, 0x3e, 0xdc, 0x68, 0xfc, 0xb2, 0x05, 0x13, 0x2c, 0x61, 0x9e,
    -	0x88, 0xfa, 0xe1, 0xfa, 0xde, 0x11, 0x5c, 0x05, 0x1e, 0x85, 0xfe, 0x80, 0x56, 0x9a, 0x4c, 0x3d,
    -	0xcf, 0x5a, 0x82, 0x39, 0x0e, 0x9d, 0x82, 0x3e, 0xd6, 0x04, 0x3a, 0x78, 0xa3, 0x7c, 0x0b, 0x5e,
    -	0x72, 0x22, 0x07, 0x33, 0x28, 0x8b, 0x3f, 0x88, 0x49, 0xab, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b,
    -	0xef, 0x8d, 0x90, 0x22, 0x99, 0x4d, 0x7b, 0x77, 0xf1, 0x07, 0xb3, 0x59, 0x76, 0xbe, 0x66, 0xff,
    -	0x75, 0x01, 0xce, 0x64, 0x96, 0xeb, 0x39, 0xfe, 0x60, 0xe7, 0xd2, 0x0f, 0x32, 0xf7, 0x57, 0xf1,
    -	0x08, 0xfd, 0xc9, 0xfb, 0x7a, 0x95, 0xfe, 0xfb, 0x7b, 0x08, 0x0b, 0x98, 0xd9, 0x65, 0xef, 0x91,
    -	0xb0, 0x80, 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x43, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x3c,
    -	0xdd, 0x67, 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x0f, 0x13,
    -	0x4d, 0xd7, 0xa3, 0x9b, 0xcf, 0x9e, 0x29, 0x8a, 0x2b, 0x5b, 0xc6, 0x9a, 0x89, 0xc6, 0x49, 0x7a,
    -	0xe4, 0x6a, 0x21, 0x03, 0xf9, 0xd7, 0xbd, 0x7a, 0xa8, 0x55, 0x37, 0x67, 0xba, 0x73, 0xa8, 0x5e,
    -	0xcc, 0x08, 0x1f, 0xb8, 0xa6, 0xe9, 0x89, 0x8a, 0xbd, 0xeb, 0x89, 0x46, 0xb3, 0x75, 0x44, 0xb3,
    -	0xaf, 0xc2, 0xd8, 0x3d, 0xdb, 0x46, 0xec, 0x6f, 0x14, 0xe1, 0xe1, 0x0e, 0xcb, 0x9e, 0xef, 0xf5,
    -	0xc6, 0x18, 0x68, 0x7b, 0x7d, 0x6a, 0x1c, 0x2a, 0x70, 0x6c, 0xb3, 0xdd, 0x68, 0xec, 0xb1, 0x87,
    -	0x53, 0xa4, 0x2e, 0x29, 0x84, 0x4c, 0x29, 0x95, 0x23, 0xc7, 0x56, 0x32, 0x68, 0x70, 0x66, 0x49,
    -	0x7a, 0xc5, 0xa2, 0x27, 0xc9, 0x9e, 0x62, 0x95, 0xb8, 0x62, 0x61, 0x1d, 0x89, 0x4d, 0x5a, 0x74,
    -	0x09, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x29, 0x1e, 0x24, 0x03, 0x7e, 0xc7, 0x52, 0xba, 0xe8, 0xf9,
    -	0x24, 0x01, 0x4e, 0x97, 0x41, 0x6f, 0x00, 0xf2, 0x6f, 0xb2, 0xc7, 0x18, 0xf5, 0x4b, 0xc4, 0x13,
    -	0x56, 0x77, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xc1, 0xeb,
    -	0x06, 0xf2, 0x83, 0xd7, 0x75, 0xde, 0x17, 0xbb, 0xa6, 0x9d, 0xbb, 0x08, 0x63, 0x87, 0x74, 0x31,
    -	0xb6, 0xff, 0x9d, 0x05, 0x4a, 0x41, 0x6c, 0x06, 0x9f, 0x7e, 0x95, 0xf9, 0x40, 0x73, 0xd5, 0xb6,
    -	0x16, 0x6f, 0xea, 0xb8, 0xe6, 0x03, 0x1d, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0xbb, 0x6c,
    -	0xdc, 0x0a, 0x44, 0x6c, 0x4c, 0x45, 0x81, 0x3e, 0x0e, 0x83, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72,
    -	0xec, 0xd0, 0xc6, 0xb8, 0x78, 0xeb, 0x5c, 0xe2, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x48, 0x21, 0xee,
    -	0x93, 0x37, 0xdb, 0x7e, 0xe4, 0x1c, 0xc1, 0x49, 0x7e, 0xc9, 0x38, 0xc9, 0x1f, 0xcf, 0x1e, 0x68,
    -	0xad, 0x49, 0xb9, 0x27, 0xf8, 0xd5, 0xc4, 0x09, 0xfe, 0x44, 0x77, 0x56, 0x9d, 0x4f, 0xee, 0x5f,
    -	0xb3, 0x60, 0xca, 0xa0, 0x3f, 0x82, 0x03, 0x64, 0xc5, 0x3c, 0x40, 0x1e, 0xe9, 0xfa, 0x0d, 0x39,
    -	0x07, 0xc7, 0x0f, 0x16, 0x13, 0x6d, 0x67, 0x07, 0xc6, 0x3b, 0xd0, 0xb7, 0xed, 0x04, 0x75, 0x71,
    -	0x2f, 0xbe, 0xd0, 0x53, 0x5f, 0xcf, 0x5d, 0x76, 0x02, 0xe1, 0xa9, 0xf0, 0x8c, 0xec, 0x75, 0x0a,
    -	0xea, 0xea, 0xa5, 0xc0, 0xaa, 0x42, 0x2f, 0xc3, 0x40, 0x58, 0xf3, 0x5b, 0xea, 0x5d, 0x16, 0xcb,
    -	0x65, 0x5c, 0x65, 0x90, 0x83, 0xfd, 0x12, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0xd1, 0x5b, 0x30,
    -	0xc6, 0x7e, 0x29, 0xb7, 0xc1, 0x62, 0xbe, 0x06, 0xa3, 0xaa, 0x13, 0x72, 0x9f, 0x5a, 0x03, 0x84,
    -	0x4d, 0x56, 0xb3, 0x5b, 0x30, 0xac, 0x3e, 0xeb, 0x81, 0x5a, 0xbb, 0xff, 0x4d, 0x11, 0xa6, 0x33,
    -	0xe6, 0x1c, 0x0a, 0x8d, 0x91, 0xb8, 0xd8, 0xe3, 0x54, 0x7d, 0x97, 0x63, 0x11, 0xb2, 0x0b, 0x54,
    -	0x5d, 0xcc, 0xad, 0x9e, 0x2b, 0xbd, 0x16, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9,
    -	0x91, 0x75, 0x35, 0xad, 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0x6f, 0xf5, 0xc1, 0xb1, 0xac, 0x98,
    -	0xc5, 0xe8, 0xb3, 0x89, 0x04, 0xe9, 0x2f, 0x74, 0xea, 0x61, 0xbd, 0x24, 0xcf, 0x9a, 0x2e, 0x42,
    -	0x85, 0xce, 0x99, 0x29, 0xd3, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x85, 0xf0, 0x09, 0x78, 0x62, 0x7b,
    -	0xb9, 0x7d, 0x7c, 0xa8, 0xe7, 0x06, 0x88, 0x8c, 0xf8, 0x61, 0xc2, 0x25, 0x49, 0x82, 0xbb, 0xbb,
    -	0x24, 0xc9, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd7, 0xa5, 0xd8, 0x7d, 0x0b, 0xe3, 0x8e, 0x2e,
    -	0x6a, 0x03, 0x16, 0x0e, 0x2e, 0x82, 0xc1, 0xac, 0x0b, 0x23, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3,
    -	0x43, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x71, 0x0b, 0x12, 0x8f, 0x6a, 0x94, 0x52,
    -	0xce, 0xca, 0x55, 0xca, 0x9d, 0x85, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xf6, 0x6d, 0xec, 0x37, 0x08,
    -	0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x51, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x42, 0x7f,
    -	0x83, 0xec, 0x92, 0x46, 0x32, 0x49, 0xe2, 0x2a, 0x05, 0x62, 0x8e, 0xb3, 0x7f, 0xb9, 0x0f, 0x4e,
    -	0x77, 0x8c, 0xa7, 0x45, 0x2f, 0x63, 0x5b, 0x4e, 0x44, 0x6e, 0x39, 0x7b, 0xc9, 0x1c, 0x66, 0x97,
    -	0x38, 0x18, 0x4b, 0x3c, 0x7b, 0x62, 0xca, 0xf3, 0x83, 0x24, 0x54, 0x98, 0x22, 0x2d, 0x88, 0xc0,
    -	0x9a, 0x2a, 0xb1, 0xe2, 0xfd, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17,
    -	0x6f, 0x57, 0xe3, 0x3c, 0x32, 0xd5, 0x55, 0x81, 0xc1, 0x1a, 0x15, 0x5a, 0x82, 0xc9, 0x56, 0xe0,
    -	0x47, 0x5c, 0x23, 0xbc, 0xc4, 0x3d, 0x67, 0xfb, 0xcd, 0x50, 0x46, 0x95, 0x04, 0x1e, 0xa7, 0x4a,
    -	0xa0, 0x17, 0x61, 0x44, 0x84, 0x37, 0xaa, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35,
    -	0x46, 0x61, 0x9d, 0x4e, 0x2b, 0xc6, 0xd4, 0xcc, 0x83, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b,
    -	0x84, 0x42, 0x1f, 0xea, 0x29, 0x14, 0x7a, 0xac, 0x96, 0x1b, 0xee, 0xd9, 0xea, 0x09, 0x5d, 0x15,
    -	0x59, 0x5f, 0xed, 0x83, 0x69, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x96, 0x9e, 0x2e, 0xf7, 0x43,
    -	0x71, 0xf7, 0xfe, 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xa3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0x9f, 0x72,
    -	0x93, 0x40, 0xbe, 0x98, 0x2b, 0xf9, 0xc5, 0x71, 0x92, 0xdf, 0x5d, 0x3a, 0x48, 0xfb, 0x8f, 0x2d,
    -	0x78, 0xa4, 0x2b, 0x47, 0xb4, 0x0c, 0xc3, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a,
    -	0x89, 0xc8, 0x91, 0x6e, 0xe3, 0x92, 0x68, 0x39, 0x95, 0x6d, 0xf3, 0xc9, 0x8c, 0x6c, 0x9b, 0xc7,
    -	0x8d, 0xee, 0xb9, 0xc7, 0x74, 0x9b, 0x5f, 0xa4, 0x27, 0x8e, 0xf9, 0x72, 0xee, 0x43, 0x86, 0xd2,
    -	0xd1, 0x4e, 0x28, 0x1d, 0x91, 0x49, 0xad, 0x9d, 0x21, 0x1f, 0x85, 0x49, 0x16, 0xf7, 0x90, 0xbd,
    -	0xf3, 0x10, 0x4f, 0xee, 0x0a, 0xb1, 0x2f, 0xf7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0xf6, 0x5f, 0x16,
    -	0x61, 0x80, 0x2f, 0xbf, 0x23, 0xb8, 0x5e, 0x3e, 0x0d, 0xc3, 0x6e, 0xb3, 0xd9, 0xe6, 0x09, 0x14,
    -	0xfb, 0x63, 0xcf, 0xe0, 0xb2, 0x04, 0xe2, 0x18, 0x8f, 0x56, 0x84, 0xbe, 0xbb, 0x43, 0x68, 0x65,
    -	0xde, 0xf0, 0xb9, 0x25, 0x27, 0x72, 0xb8, 0xac, 0xa4, 0xce, 0xd9, 0x58, 0x33, 0x8e, 0x3e, 0x05,
    -	0x10, 0x46, 0x81, 0xeb, 0x6d, 0x51, 0x98, 0x88, 0xbf, 0xff, 0x54, 0x07, 0x6e, 0x55, 0x45, 0xcc,
    -	0x79, 0xc6, 0x7b, 0x8e, 0x42, 0x60, 0x8d, 0x23, 0x9a, 0x33, 0x4e, 0xfa, 0xd9, 0xc4, 0xd8, 0x01,
    -	0xe7, 0x1a, 0x8f, 0xd9, 0xec, 0x4b, 0x30, 0xac, 0x98, 0x77, 0xd3, 0x7e, 0x8d, 0xea, 0x62, 0xd1,
    -	0x47, 0x60, 0x22, 0xd1, 0xb6, 0x43, 0x29, 0xcf, 0x7e, 0xc5, 0x82, 0x09, 0xde, 0x98, 0x65, 0x6f,
    -	0x57, 0x9c, 0x06, 0x77, 0xe0, 0x58, 0x23, 0x63, 0x57, 0x16, 0xc3, 0xdf, 0xfb, 0x2e, 0xae, 0x94,
    -	0x65, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x3a, 0x4f, 0x57, 0x1c, 0xdd, 0x75, 0x9d, 0x86, 0x88, 0xa1,
    -	0x30, 0xca, 0x57, 0x1b, 0x87, 0x61, 0x85, 0xb5, 0xff, 0xd4, 0x82, 0x29, 0xde, 0xf2, 0x2b, 0x64,
    -	0x4f, 0xed, 0x4d, 0xdf, 0xca, 0xb6, 0x8b, 0xd4, 0xbd, 0x85, 0x9c, 0xd4, 0xbd, 0xfa, 0xa7, 0x15,
    -	0x3b, 0x7e, 0xda, 0x57, 0x2c, 0x10, 0x33, 0xe4, 0x08, 0xf4, 0x19, 0xdf, 0x65, 0xea, 0x33, 0x66,
    -	0xf3, 0x17, 0x41, 0x8e, 0x22, 0xe3, 0xef, 0x2d, 0x98, 0xe4, 0x04, 0xb1, 0xad, 0xfe, 0x5b, 0x3a,
    -	0x0e, 0x0b, 0xe6, 0x17, 0x65, 0x3a, 0x5f, 0x5e, 0x21, 0x7b, 0x1b, 0x7e, 0xc5, 0x89, 0xb6, 0xb3,
    -	0x3f, 0xca, 0x18, 0xac, 0xbe, 0x8e, 0x83, 0x55, 0x97, 0x0b, 0xc8, 0x48, 0x37, 0xd7, 0x25, 0x08,
    -	0xc1, 0x61, 0xd3, 0xcd, 0xd9, 0x7f, 0x65, 0x01, 0xe2, 0xd5, 0x18, 0x82, 0x1b, 0x15, 0x87, 0x18,
    -	0x54, 0x3b, 0xe8, 0xe2, 0xad, 0x49, 0x61, 0xb0, 0x46, 0x75, 0x5f, 0xba, 0x27, 0xe1, 0x70, 0x51,
    -	0xec, 0xee, 0x70, 0x71, 0x88, 0x1e, 0xfd, 0xca, 0x20, 0x24, 0x5f, 0xf6, 0xa1, 0xeb, 0x30, 0x5a,
    -	0x73, 0x5a, 0xce, 0x4d, 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0x3b, 0x79, 0x63, 0x2d, 0x6a, 0x74, 0xc2,
    -	0x44, 0xae, 0x41, 0xb0, 0xc1, 0x07, 0xcd, 0x01, 0xb4, 0x02, 0x77, 0xd7, 0x6d, 0x90, 0x2d, 0xa6,
    -	0x76, 0x61, 0x51, 0x5b, 0xb8, 0x6b, 0x98, 0x84, 0x62, 0x8d, 0x22, 0x23, 0x54, 0x43, 0xf1, 0x01,
    -	0x87, 0x6a, 0x80, 0x23, 0x0b, 0xd5, 0xd0, 0x77, 0xa8, 0x50, 0x0d, 0x43, 0x87, 0x0e, 0xd5, 0xd0,
    -	0xdf, 0x53, 0xa8, 0x06, 0x0c, 0x27, 0xa4, 0xec, 0x49, 0xff, 0xaf, 0xb8, 0x0d, 0x22, 0x2e, 0x1c,
    -	0x3c, 0xd4, 0xcc, 0xec, 0xdd, 0xfd, 0xd2, 0x09, 0x9c, 0x49, 0x81, 0x73, 0x4a, 0xa2, 0x8f, 0xc1,
    -	0x8c, 0xd3, 0x68, 0xf8, 0xb7, 0xd4, 0xa0, 0x2e, 0x87, 0x35, 0xa7, 0xc1, 0x4d, 0x20, 0x83, 0x8c,
    -	0xeb, 0xa9, 0xbb, 0xfb, 0xa5, 0x99, 0xf9, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x06, 0xc3, 0xad,
    -	0xc0, 0xaf, 0xad, 0x69, 0xcf, 0x8f, 0xcf, 0xd0, 0x0e, 0xac, 0x48, 0xe0, 0xc1, 0x7e, 0x69, 0x4c,
    -	0xfd, 0x61, 0x07, 0x7e, 0x5c, 0x20, 0x23, 0xf6, 0xc2, 0xc8, 0x83, 0x8e, 0xbd, 0x30, 0x7a, 0xbf,
    -	0x63, 0x2f, 0xec, 0xc0, 0x74, 0x95, 0x04, 0xae, 0xd3, 0x70, 0xef, 0x50, 0x99, 0x5c, 0xee, 0x81,
    -	0x1b, 0x30, 0x1c, 0x24, 0x76, 0xfd, 0x9e, 0x42, 0x2a, 0x6b, 0x59, 0xc5, 0xe4, 0x2e, 0x1f, 0x33,
    -	0xb2, 0xff, 0xab, 0x05, 0x83, 0xe2, 0xb5, 0xe0, 0x11, 0x48, 0xa6, 0xf3, 0x86, 0xe1, 0xa3, 0x94,
    -	0x3d, 0x28, 0xac, 0x31, 0xb9, 0x26, 0x8f, 0x72, 0xc2, 0xe4, 0xf1, 0x48, 0x27, 0x26, 0x9d, 0x8d,
    -	0x1d, 0xff, 0x67, 0x91, 0xde, 0x10, 0x8c, 0x77, 0xeb, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14,
    -	0xef, 0xa6, 0x0b, 0xf9, 0x2f, 0x4e, 0x92, 0x83, 0x18, 0x7b, 0xea, 0x89, 0x97, 0xd2, 0x92, 0x49,
    -	0xe6, 0x83, 0xec, 0xe2, 0x03, 0x7c, 0x90, 0xdd, 0xed, 0x65, 0x7f, 0xdf, 0xfd, 0x78, 0xd9, 0x6f,
    -	0x7f, 0x9d, 0x9d, 0xce, 0x3a, 0xfc, 0x08, 0x04, 0xb7, 0x4b, 0xe6, 0x39, 0x6e, 0x77, 0x98, 0x59,
    -	0xa2, 0x51, 0x39, 0x02, 0xdc, 0x2f, 0x59, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x69, 0xee, 0x19, 0x18,
    -	0x72, 0xda, 0x75, 0x57, 0xad, 0x65, 0xcd, 0xfc, 0x39, 0x2f, 0xe0, 0x58, 0x51, 0xa0, 0x45, 0x98,
    -	0x22, 0xb7, 0x5b, 0x2e, 0x37, 0x16, 0xeb, 0x0e, 0xce, 0x45, 0xfe, 0xc4, 0x74, 0x39, 0x89, 0xc4,
    -	0x69, 0x7a, 0x15, 0xe8, 0xaa, 0x98, 0x1b, 0xe8, 0xea, 0x17, 0x2c, 0x18, 0x51, 0x2f, 0x87, 0x1f,
    -	0x78, 0x6f, 0x7f, 0xd4, 0xec, 0xed, 0x87, 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x51, 0x41, 0xb5,
    -	0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x52, 0xe2, 0xbd, 0x3f, 0xce, 0xb8, 0x08, 0x23, 0x4e, 0xab, 0x25,
    -	0x11, 0xd2, 0xcb, 0x8e, 0x05, 0xc8, 0x8f, 0xc1, 0x58, 0xa7, 0x51, 0x6f, 0x45, 0x8a, 0xb9, 0x6f,
    -	0x45, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0xc2, 0x84, 0x53, 0x70, 0xfe, 0x7e, 0xd3, 0x8e,
    -	0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, 0xcc, 0x95, 0xbd, 0xe8, 0x6a, 0xc0, 0xaf, 0xa9, 0x5a,
    -	0xa8, 0x38, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, 0x94, 0x0c, 0x56, 0x47, 0xbf, 0xe9, 0xae, 0xb1, 0x2e,
    -	0xe0, 0x58, 0x51, 0xd8, 0x2f, 0xb1, 0xd3, 0x87, 0xf5, 0xe9, 0xe1, 0xc2, 0xa4, 0xfd, 0xf5, 0xa8,
    -	0x1a, 0x0d, 0x66, 0x78, 0x5d, 0xd2, 0x83, 0xb1, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xcb,
    -	0x38, 0x62, 0x1b, 0xfa, 0x44, 0xca, 0x05, 0xe7, 0xd9, 0x2e, 0xa7, 0xc6, 0x21, 0x9c, 0x6e, 0x58,
    -	0xb6, 0x2c, 0x96, 0x4b, 0xa8, 0x5c, 0x11, 0xeb, 0x42, 0xcb, 0x96, 0x25, 0x10, 0x38, 0xa6, 0xa1,
    -	0x02, 0x9b, 0xfa, 0x13, 0xce, 0xa0, 0x38, 0xa8, 0xb2, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08,
    -	0xa5, 0x05, 0xb7, 0x3d, 0x3c, 0x9c, 0x50, 0x5a, 0xc8, 0xee, 0xd2, 0x34, 0x4d, 0x17, 0x61, 0x84,
    -	0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, 0x7f, 0x1c, 0x07, 0x74, 0x39, 0x06, 0x63, 0x9d,
    -	0x06, 0x6d, 0xc0, 0x44, 0xc8, 0x75, 0x79, 0x2a, 0x94, 0x3f, 0xd7, 0x89, 0x3e, 0xa5, 0xde, 0x6c,
    -	0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, 0x64, 0x91, 0x64, 0x81, 0x5e, 0x87, 0xf1, 0x86,
    -	0xef, 0xd4, 0x17, 0x9c, 0x86, 0xe3, 0xd5, 0x58, 0xff, 0x0c, 0x99, 0x39, 0xd7, 0x57, 0x0d, 0x2c,
    -	0x4e, 0x50, 0x53, 0x01, 0x51, 0x87, 0x88, 0x70, 0x73, 0x8e, 0xb7, 0x45, 0xc2, 0x99, 0x61, 0xf6,
    -	0x55, 0x4c, 0x40, 0x5c, 0xcd, 0xa1, 0xc1, 0xb9, 0xa5, 0xd1, 0xcb, 0x30, 0x2a, 0x3f, 0x5f, 0x0b,
    -	0xfc, 0x12, 0x3f, 0xbb, 0xd1, 0x70, 0xd8, 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67,
    -	0x73, 0xd3, 0xad, 0x89, 0x68, 0x08, 0xfc, 0x89, 0xf2, 0x47, 0xe4, 0x7b, 0xc8, 0xe5, 0x2c, 0xa2,
    -	0x83, 0xfd, 0xd2, 0x29, 0xd1, 0x6b, 0x99, 0x78, 0x9c, 0xcd, 0x1b, 0xad, 0xc1, 0xf4, 0x36, 0x71,
    -	0x1a, 0xd1, 0xf6, 0xe2, 0x36, 0xa9, 0xed, 0xc8, 0x05, 0xc7, 0xa4, 0x46, 0xed, 0x79, 0xca, 0xe5,
    -	0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x86, 0x99, 0x56, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xd7, 0xfd,
    -	0x88, 0x39, 0x3a, 0xcd, 0xd7, 0xeb, 0x01, 0x09, 0xf9, 0x0b, 0x56, 0x76, 0xf4, 0xca, 0x60, 0x3d,
    -	0x95, 0x1c, 0x3a, 0x9c, 0xcb, 0x01, 0xdd, 0x81, 0xe3, 0x89, 0x89, 0x20, 0xa2, 0x6e, 0x8c, 0xe7,
    -	0x27, 0xf2, 0xa9, 0x66, 0x15, 0x10, 0x01, 0x6c, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, 0x0a, 0x80,
    -	0xdb, 0x5a, 0x71, 0x9a, 0x6e, 0x83, 0x5e, 0x47, 0xa7, 0xd9, 0x1c, 0xa1, 0x57, 0x13, 0x28, 0x57,
    -	0x24, 0x94, 0xee, 0xcd, 0xe2, 0xdf, 0x1e, 0xd6, 0xa8, 0xd1, 0x2a, 0x8c, 0x8b, 0x7f, 0x7b, 0x62,
    -	0x48, 0xa7, 0x54, 0xce, 0xc7, 0x71, 0x59, 0x42, 0x8d, 0x63, 0x02, 0x82, 0x13, 0x65, 0xd1, 0x16,
    -	0x9c, 0x96, 0x09, 0x27, 0xf5, 0xf9, 0x29, 0xc7, 0x20, 0x64, 0xd9, 0x73, 0x86, 0xf8, 0xcb, 0x97,
    -	0xf9, 0x4e, 0x84, 0xb8, 0x33, 0x1f, 0x7a, 0xae, 0xeb, 0xd3, 0x9c, 0xbf, 0x6b, 0x3e, 0x1e, 0x47,
    -	0x4e, 0x5c, 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0xf9, 0x70, 0xdc, 0xf5, 0xb2, 0x66, 0xf5, 0x09, 0xc6,
    -	0xe8, 0xc3, 0xfc, 0x49, 0x77, 0xe7, 0x19, 0x9d, 0x89, 0xc7, 0xd9, 0x7c, 0x51, 0x19, 0xa6, 0x23,
    -	0x0e, 0x58, 0x72, 0x43, 0x9e, 0x9c, 0x83, 0x5e, 0xfb, 0x1e, 0xe2, 0x29, 0xf1, 0xe9, 0x6c, 0xde,
    -	0x48, 0xa3, 0x71, 0x56, 0x99, 0x77, 0xe7, 0xa6, 0xf8, 0x27, 0x16, 0x2d, 0xad, 0x09, 0xfa, 0xe8,
    -	0xd3, 0x30, 0xaa, 0xf7, 0x8f, 0x10, 0x5a, 0xce, 0x65, 0xcb, 0xc1, 0xda, 0xf6, 0xc2, 0xaf, 0x09,
    -	0x6a, 0x0b, 0xd1, 0x71, 0xd8, 0xe0, 0x88, 0x6a, 0x19, 0xa1, 0x18, 0x2e, 0xf4, 0x26, 0x14, 0xf5,
    -	0xee, 0xa5, 0x47, 0x20, 0x7b, 0xe5, 0xa0, 0x55, 0x18, 0xaa, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a,
    -	0xa7, 0x80, 0x96, 0x8b, 0x82, 0x46, 0x2c, 0x45, 0x91, 0x53, 0x87, 0xc3, 0xb0, 0xe2, 0x60, 0xbf,
    -	0x0c, 0x23, 0xd5, 0x06, 0x21, 0x2d, 0xfe, 0xda, 0x08, 0x3d, 0xc9, 0x2e, 0x26, 0x4c, 0xb4, 0xb4,
    -	0x98, 0x68, 0xa9, 0xdf, 0x39, 0x98, 0x50, 0x29, 0xf1, 0xf6, 0xef, 0x14, 0xa0, 0xd4, 0x25, 0xb5,
    -	0x53, 0xc2, 0xde, 0x66, 0xf5, 0x64, 0x6f, 0x9b, 0x87, 0x89, 0xf8, 0x9f, 0xae, 0xca, 0x53, 0x2e,
    -	0xbb, 0xd7, 0x4d, 0x34, 0x4e, 0xd2, 0xf7, 0xfc, 0xfa, 0x42, 0x37, 0xd9, 0xf5, 0x75, 0x7d, 0x3f,
    -	0x64, 0x98, 0xea, 0xfb, 0x7b, 0xbf, 0x7b, 0xe7, 0x9a, 0x5d, 0xed, 0xaf, 0x17, 0xe0, 0xb8, 0xea,
    -	0xc2, 0xef, 0xdc, 0x8e, 0xbb, 0x96, 0xee, 0xb8, 0xfb, 0x60, 0xb4, 0xb6, 0xaf, 0xc2, 0x00, 0x8f,
    -	0xed, 0xd9, 0x83, 0xcc, 0xff, 0xa8, 0x19, 0x72, 0x5c, 0x89, 0x99, 0x46, 0xd8, 0xf1, 0x1f, 0xb2,
    -	0x60, 0x22, 0xf1, 0x8c, 0x0f, 0x61, 0xed, 0xad, 0xf7, 0xbd, 0xc8, 0xe5, 0x59, 0x12, 0xff, 0x59,
    -	0xe8, 0xdb, 0xf6, 0xc3, 0x28, 0xe9, 0xd1, 0x72, 0xd9, 0x0f, 0x23, 0xcc, 0x30, 0xf6, 0x9f, 0x59,
    -	0xd0, 0xbf, 0xe1, 0xb8, 0x5e, 0x24, 0xad, 0x1f, 0x56, 0x8e, 0xf5, 0xa3, 0x97, 0xef, 0x42, 0x2f,
    -	0xc2, 0x00, 0xd9, 0xdc, 0x24, 0xb5, 0x48, 0x8c, 0xaa, 0x8c, 0xf9, 0x30, 0xb0, 0xcc, 0xa0, 0x54,
    -	0x08, 0x65, 0x95, 0xf1, 0xbf, 0x58, 0x10, 0xa3, 0x1b, 0x30, 0x1c, 0xb9, 0x4d, 0x32, 0x5f, 0xaf,
    -	0x0b, 0x9f, 0x80, 0x7b, 0x08, 0x54, 0xb2, 0x21, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0xa5, 0x02, 0x40,
    -	0x1c, 0xb0, 0xac, 0xdb, 0x27, 0x2e, 0xa4, 0xac, 0xc5, 0xe7, 0x32, 0xac, 0xc5, 0x28, 0x66, 0x98,
    -	0x61, 0x2a, 0x56, 0xdd, 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x8b, 0x30, 0x15, 0x07,
    -	0x5c, 0x33, 0xe3, 0x4d, 0xb2, 0xf3, 0x7b, 0x23, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x59, 0x15,
    -	0x77, 0x4a, 0x9c, 0x85, 0xcc, 0xe1, 0x5d, 0xb7, 0xbe, 0x77, 0xe9, 0xa7, 0xd8, 0x1c, 0x5e, 0xc8,
    -	0x35, 0x87, 0xff, 0x94, 0x05, 0xc7, 0x92, 0xf5, 0xb0, 0xd7, 0xe1, 0x5f, 0xb0, 0xe0, 0x78, 0x9c,
    -	0xd9, 0x24, 0xed, 0x82, 0xf0, 0x42, 0xc7, 0x58, 0x5a, 0x39, 0x2d, 0x8e, 0x83, 0x8b, 0xac, 0x65,
    -	0xb1, 0xc6, 0xd9, 0x35, 0xda, 0xff, 0xa5, 0x0f, 0x66, 0xf2, 0x82, 0x70, 0xb1, 0xf7, 0x30, 0xce,
    -	0xed, 0xea, 0x0e, 0xb9, 0x25, 0x5e, 0x1d, 0xc4, 0xef, 0x61, 0x38, 0x18, 0x4b, 0x7c, 0x32, 0x99,
    -	0x4d, 0xa1, 0xc7, 0x64, 0x36, 0xdb, 0x30, 0x75, 0x6b, 0x9b, 0x78, 0xd7, 0xbc, 0xd0, 0x89, 0xdc,
    -	0x70, 0xd3, 0x65, 0x06, 0x74, 0x3e, 0x6f, 0x64, 0x42, 0xf6, 0xa9, 0x1b, 0x49, 0x82, 0x83, 0xfd,
    -	0xd2, 0x69, 0x03, 0x10, 0x37, 0x99, 0x6f, 0x24, 0x38, 0xcd, 0x34, 0x9d, 0x0b, 0xa8, 0xef, 0x01,
    -	0xe7, 0x02, 0x6a, 0xba, 0xc2, 0xed, 0x46, 0x3e, 0x76, 0x60, 0xd7, 0xd6, 0x35, 0x05, 0xc5, 0x1a,
    -	0x05, 0xfa, 0x24, 0x20, 0x3d, 0x99, 0x9b, 0x11, 0x03, 0xf5, 0xd9, 0xbb, 0xfb, 0x25, 0xb4, 0x9e,
    -	0xc2, 0x1e, 0xec, 0x97, 0xa6, 0x29, 0xb4, 0xec, 0xd1, 0xeb, 0x6f, 0x1c, 0x38, 0x2e, 0x83, 0x11,
    -	0xba, 0x01, 0x93, 0x14, 0xca, 0x56, 0x94, 0x0c, 0xb0, 0xca, 0xaf, 0xac, 0x4f, 0xdf, 0xdd, 0x2f,
    -	0x4d, 0xae, 0x27, 0x70, 0x79, 0xac, 0x53, 0x4c, 0x32, 0x52, 0x02, 0x0d, 0xf5, 0x9a, 0x12, 0xc8,
    -	0xfe, 0x82, 0x05, 0x27, 0xe9, 0x01, 0x57, 0x5f, 0xcd, 0xb1, 0xa2, 0x3b, 0x2d, 0x97, 0xdb, 0x69,
    -	0xc4, 0x51, 0xc3, 0x74, 0x75, 0x95, 0x32, 0xb7, 0xd2, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf,
    -	0x9e, 0xdc, 0xe1, 0xaf, 0xb8, 0x5e, 0x1d, 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x6f, 0x2e, 0xbe,
    -	0x4a, 0xd7, 0x2a, 0x6d, 0xcb, 0xb7, 0xb4, 0x19, 0xe8, 0x69, 0xdd, 0xa6, 0x2a, 0xdc, 0x27, 0x73,
    -	0xed, 0xa9, 0x9f, 0xb7, 0x40, 0xbc, 0xd1, 0xee, 0xe1, 0x4c, 0x7e, 0x0b, 0x46, 0x77, 0xd3, 0x89,
    -	0x2e, 0xcf, 0xe6, 0x3f, 0x5a, 0x17, 0xe1, 0xeb, 0x95, 0x88, 0x6e, 0x24, 0xb5, 0x34, 0x78, 0xd9,
    -	0x75, 0x10, 0xd8, 0x25, 0xc2, 0xac, 0x1a, 0xdd, 0x5b, 0xf3, 0x1c, 0x40, 0x9d, 0xd1, 0xb2, 0xec,
    -	0xd7, 0x05, 0x53, 0xe2, 0x5a, 0x52, 0x18, 0xac, 0x51, 0xd9, 0x3f, 0x57, 0x84, 0x11, 0x99, 0x58,
    -	0xb1, 0xed, 0xf5, 0xa2, 0x7b, 0x3c, 0x54, 0xa6, 0x75, 0xf4, 0x36, 0x4c, 0x05, 0xa4, 0xd6, 0x0e,
    -	0x42, 0x77, 0x97, 0x48, 0xb4, 0x58, 0x24, 0x73, 0x3c, 0xa8, 0x7e, 0x02, 0x79, 0xc0, 0x02, 0x39,
    -	0x25, 0x80, 0xcc, 0x68, 0x9c, 0x66, 0x84, 0x2e, 0xc0, 0x30, 0x53, 0xbd, 0x57, 0x62, 0x85, 0xb0,
    -	0x52, 0x7c, 0xad, 0x49, 0x04, 0x8e, 0x69, 0xd8, 0xe5, 0xa0, 0x7d, 0x93, 0x91, 0x27, 0xde, 0x2b,
    -	0x57, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x4c, 0xf2, 0x72, 0x81, 0xdf, 0x72, 0xb6, 0xb8, 0x49,
    -	0xb0, 0x5f, 0x05, 0x81, 0x99, 0x5c, 0x4b, 0xe0, 0x0e, 0xf6, 0x4b, 0xc7, 0x92, 0x30, 0xd6, 0xec,
    -	0x14, 0x17, 0xe6, 0xf9, 0xc7, 0x2b, 0xa1, 0x67, 0x46, 0xca, 0x61, 0x30, 0x46, 0x61, 0x9d, 0xce,
    -	0xfe, 0x3b, 0x0b, 0xa6, 0xb4, 0xa1, 0xea, 0x39, 0xaf, 0x81, 0xd1, 0x49, 0x85, 0x1e, 0x3a, 0xe9,
    -	0x70, 0x31, 0x09, 0x32, 0x47, 0xb8, 0xef, 0x3e, 0x8d, 0xb0, 0xfd, 0x69, 0x40, 0xe9, 0xac, 0x9d,
    -	0xe8, 0x0d, 0xee, 0x2e, 0xef, 0x06, 0xa4, 0xde, 0xc9, 0xe0, 0xaf, 0xc7, 0x77, 0x91, 0xef, 0x2b,
    -	0x79, 0x29, 0xac, 0xca, 0xdb, 0x3f, 0xdc, 0x07, 0x93, 0xc9, 0x88, 0x12, 0xe8, 0x32, 0x0c, 0x70,
    -	0x29, 0x5d, 0xb0, 0xef, 0xe0, 0x4f, 0xa6, 0xc5, 0xa1, 0x60, 0xf2, 0x8a, 0x10, 0xf4, 0x45, 0x79,
    -	0xf4, 0x36, 0x8c, 0xd4, 0xfd, 0x5b, 0xde, 0x2d, 0x27, 0xa8, 0xcf, 0x57, 0xca, 0x62, 0x87, 0xc8,
    -	0x54, 0x40, 0x2d, 0xc5, 0x64, 0x7a, 0x6c, 0x0b, 0xe6, 0x3b, 0x11, 0xa3, 0xb0, 0xce, 0x0e, 0x6d,
    -	0xb0, 0x44, 0x34, 0x9b, 0xee, 0xd6, 0x9a, 0xd3, 0xea, 0xf4, 0x76, 0x6a, 0x51, 0x12, 0x69, 0x9c,
    -	0xc7, 0x44, 0xb6, 0x1a, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x59, 0x98, 0x0e, 0x73, 0x4c, 0x62, 0x79,
    -	0x49, 0x9c, 0x3b, 0x59, 0x89, 0xb8, 0x32, 0x25, 0xcb, 0x78, 0x96, 0x55, 0x0d, 0xba, 0x0d, 0x48,
    -	0xa8, 0x9e, 0x37, 0x82, 0x76, 0x18, 0x2d, 0xb4, 0xbd, 0x7a, 0x43, 0x26, 0xaa, 0xc9, 0x4e, 0xf3,
    -	0x9e, 0xa2, 0xd6, 0xea, 0x66, 0x11, 0x66, 0xd3, 0x14, 0x38, 0xa3, 0x0e, 0xfb, 0xf3, 0x7d, 0x30,
    -	0x2b, 0xd3, 0xe4, 0x66, 0xbc, 0x11, 0xf9, 0x9c, 0x95, 0x78, 0x24, 0xf2, 0x4a, 0xfe, 0x46, 0xff,
    -	0xc0, 0x9e, 0x8a, 0x7c, 0x31, 0xfd, 0x54, 0xe4, 0xb5, 0x43, 0x36, 0xe3, 0xbe, 0x3d, 0x18, 0xf9,
    -	0x8e, 0x7d, 0xe5, 0xf1, 0xe5, 0x63, 0x60, 0x1c, 0xcd, 0x08, 0xf3, 0xf0, 0xdd, 0x15, 0x69, 0x3a,
    -	0xca, 0xb9, 0xfe, 0x5f, 0x16, 0x34, 0xc6, 0x61, 0x3f, 0x2a, 0x83, 0x7c, 0xb3, 0x7d, 0x56, 0xf1,
    -	0xa1, 0x3c, 0x49, 0xb3, 0x15, 0xed, 0x2d, 0xb9, 0x81, 0x68, 0x71, 0x26, 0xcf, 0x65, 0x41, 0x93,
    -	0xe6, 0x29, 0x31, 0x58, 0xf1, 0x41, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0x89, 0xcc, 0xf2, 0xc5, 0xfc,
    -	0x75, 0x7b, 0x69, 0x71, 0xb9, 0x43, 0x5a, 0x79, 0x76, 0xf9, 0x4b, 0x91, 0xe0, 0x74, 0x15, 0x2c,
    -	0xab, 0xbd, 0x73, 0x2b, 0x5c, 0x6e, 0x38, 0x61, 0xe4, 0xd6, 0x16, 0x1a, 0x7e, 0x6d, 0xa7, 0x1a,
    -	0xf9, 0x81, 0x4c, 0x6b, 0x97, 0x79, 0xf7, 0x9a, 0xbf, 0x51, 0x4d, 0xd1, 0xa7, 0xb3, 0xda, 0x67,
    -	0x51, 0xe1, 0xcc, 0xba, 0xd0, 0x3a, 0x0c, 0x6e, 0xb9, 0x11, 0x26, 0x2d, 0x5f, 0xec, 0x16, 0x99,
    -	0x5b, 0xe1, 0x25, 0x4e, 0x92, 0xce, 0x32, 0x2f, 0x10, 0x58, 0x32, 0x41, 0x6f, 0xa8, 0x43, 0x60,
    -	0x20, 0x5f, 0x01, 0x9b, 0xf6, 0xbd, 0xcb, 0x3c, 0x06, 0x5e, 0x87, 0xa2, 0xb7, 0x19, 0x76, 0x8a,
    -	0x18, 0xb3, 0xbe, 0x52, 0x4d, 0x67, 0x7f, 0x5f, 0x5f, 0xa9, 0x62, 0x5a, 0x90, 0x3d, 0x2e, 0x0d,
    -	0x6b, 0xa1, 0x2b, 0x12, 0xf4, 0x64, 0xbe, 0xb5, 0x2d, 0x57, 0x17, 0xab, 0xe5, 0x74, 0xc6, 0x7b,
    -	0x06, 0xc6, 0xbc, 0x38, 0xba, 0x0e, 0xc3, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0xaa, 0xec, 0xcc,
    -	0xc3, 0xe8, 0x92, 0x24, 0x4a, 0xe7, 0xb9, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78,
    -	0x32, 0xd7, 0x38, 0x7b, 0x12, 0x26, 0xdc, 0xd4, 0x5e, 0xec, 0x25, 0xf9, 0x3b, 0x2b, 0x60, 0x54,
    -	0xc8, 0xcc, 0x2f, 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0x7a, 0xa7, 0x4c, 0x32,
    -	0x89, 0xf0, 0x39, 0xbc, 0xa3, 0xf1, 0xc2, 0x12, 0xa6, 0x05, 0xd1, 0x06, 0xc0, 0x66, 0x83, 0x88,
    -	0xb8, 0x84, 0xc2, 0x29, 0x2a, 0xf3, 0xf4, 0x5f, 0x51, 0x54, 0x32, 0x27, 0x15, 0x15, 0xb3, 0x63,
    -	0x28, 0xd6, 0xf8, 0xd0, 0xa9, 0x54, 0x73, 0xbd, 0x3a, 0x09, 0x98, 0x71, 0x2b, 0x67, 0x2a, 0x2d,
    -	0x32, 0x8a, 0xf4, 0x54, 0xe2, 0x70, 0x2c, 0x38, 0x30, 0x5e, 0xa4, 0xb5, 0xbd, 0x19, 0x76, 0x4a,
    -	0x8c, 0xb0, 0x48, 0x5a, 0xdb, 0x89, 0x09, 0xc5, 0x79, 0x31, 0x38, 0x16, 0x1c, 0xe8, 0x92, 0xd9,
    -	0xa4, 0x0b, 0x88, 0x04, 0x33, 0x13, 0xf9, 0x4b, 0x66, 0x85, 0x93, 0xa4, 0x97, 0x8c, 0x40, 0x60,
    -	0xc9, 0x04, 0x7d, 0xca, 0x94, 0x76, 0x26, 0x19, 0xcf, 0xa7, 0xbb, 0x48, 0x3b, 0x06, 0xdf, 0xce,
    -	0xf2, 0xce, 0x2b, 0x50, 0xd8, 0xac, 0x31, 0xa3, 0x58, 0x8e, 0xcd, 0x60, 0x65, 0xd1, 0xe0, 0xc6,
    -	0x02, 0x8d, 0xaf, 0x2c, 0xe2, 0xc2, 0x66, 0x8d, 0x4e, 0x7d, 0xe7, 0x4e, 0x3b, 0x20, 0x2b, 0x6e,
    -	0x83, 0x88, 0x24, 0x09, 0x99, 0x53, 0x7f, 0x5e, 0x12, 0xa5, 0xa7, 0xbe, 0x42, 0xe1, 0x98, 0x15,
    -	0xe5, 0x1b, 0xcb, 0x60, 0xd3, 0xf9, 0x7c, 0x95, 0xa8, 0x95, 0xe6, 0x9b, 0x29, 0x85, 0xed, 0xc0,
    -	0xd8, 0x6e, 0xd8, 0xda, 0x26, 0x72, 0x57, 0x64, 0xe6, 0xba, 0x9c, 0x78, 0x0a, 0xd7, 0x05, 0xa1,
    -	0x1b, 0x44, 0x6d, 0xa7, 0x91, 0xda, 0xc8, 0x99, 0x6a, 0xe5, 0xba, 0xce, 0x0c, 0x9b, 0xbc, 0xe9,
    -	0x44, 0x78, 0x87, 0x07, 0x3d, 0x63, 0x86, 0xbb, 0x9c, 0x89, 0x90, 0x11, 0x17, 0x8d, 0x4f, 0x04,
    -	0x81, 0xc0, 0x92, 0x89, 0xea, 0x6c, 0x76, 0x00, 0x9d, 0xe8, 0xd2, 0xd9, 0xa9, 0xf6, 0xc6, 0x9d,
    -	0xcd, 0x0e, 0x9c, 0x98, 0x15, 0x3b, 0x68, 0x5a, 0x19, 0x69, 0xd9, 0x99, 0xd9, 0x2e, 0xe7, 0xa0,
    -	0xe9, 0x96, 0xc6, 0x9d, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbf,
    -	0x4e, 0x24, 0x72, 0x78, 0x32, 0x27, 0xfc, 0x63, 0x3a, 0xc8, 0x1d, 0xff, 0x38, 0x85, 0xc2, 0x31,
    -	0x2b, 0x54, 0x87, 0xf1, 0x96, 0x11, 0x17, 0x95, 0x25, 0xa4, 0xc8, 0x91, 0x0b, 0xb2, 0x22, 0xa8,
    -	0x72, 0x0d, 0x91, 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x73, 0x8f, 0x3f, 0xf5, 0x63, 0xf9, 0x2a, 0x72,
    -	0x86, 0x3a, 0xe3, 0x35, 0x20, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x40, 0xcd,
    -	0x0f, 0x59, 0xda, 0x97, 0x3c, 0x03, 0x7b, 0x96, 0x99, 0x48, 0x06, 0x03, 0x17, 0x28, 0x1c, 0xb3,
    -	0xa2, 0x3b, 0x39, 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0,
    -	0x2b, 0x8a, 0xa3, 0x4e, 0xc5, 0xae, 0x66, 0x29, 0x2b, 0x72, 0xda, 0xa5, 0x82, 0x5f, 0xa7, 0xdb,
    -	0xa5, 0x50, 0x38, 0x66, 0x65, 0xff, 0x70, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xed, 0xab, 0x12,
    -	0xfb, 0x1a, 0x25, 0x6c, 0x5f, 0x5c, 0x13, 0x13, 0x53, 0xf5, 0x1c, 0xce, 0xf6, 0x12, 0x4c, 0xa9,
    -	0x67, 0x84, 0x0d, 0xb7, 0xb6, 0xb7, 0x1e, 0x2b, 0xbf, 0x54, 0xe0, 0x97, 0x6a, 0x92, 0x00, 0xa7,
    -	0xcb, 0xa0, 0x79, 0x98, 0x30, 0x80, 0xe5, 0x25, 0x71, 0x6d, 0x8f, 0x93, 0x24, 0x98, 0x68, 0x9c,
    -	0xa4, 0xb7, 0x7f, 0xde, 0x82, 0x87, 0x72, 0xb2, 0x62, 0xf7, 0x1c, 0xad, 0x75, 0x13, 0x26, 0x5a,
    -	0x66, 0xd1, 0x2e, 0x01, 0xa6, 0x8d, 0xdc, 0xdb, 0xaa, 0xad, 0x09, 0x04, 0x4e, 0x32, 0xb5, 0x7f,
    -	0xb6, 0x00, 0xa7, 0x3b, 0xfa, 0xc5, 0x23, 0x0c, 0x27, 0xb6, 0x9a, 0xa1, 0xb3, 0x18, 0x90, 0x3a,
    -	0xf1, 0x22, 0xd7, 0x69, 0x54, 0x5b, 0xa4, 0xa6, 0x59, 0x2f, 0x99, 0x83, 0xf9, 0xa5, 0xb5, 0xea,
    -	0x7c, 0x9a, 0x02, 0xe7, 0x94, 0x44, 0x2b, 0x80, 0xd2, 0x18, 0x31, 0xc2, 0xec, 0x6a, 0x9a, 0xe6,
    -	0x87, 0x33, 0x4a, 0xa0, 0x97, 0x60, 0x4c, 0xf9, 0xdb, 0x6b, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23,
    -	0xb0, 0x49, 0x87, 0x2e, 0xf2, 0xec, 0x39, 0x22, 0xcf, 0x92, 0x30, 0x75, 0x4e, 0xc8, 0xd4, 0x38,
    -	0x02, 0x8c, 0x75, 0x9a, 0x85, 0x97, 0x7f, 0xef, 0x9b, 0x67, 0x3e, 0xf0, 0x87, 0xdf, 0x3c, 0xf3,
    -	0x81, 0x3f, 0xfd, 0xe6, 0x99, 0x0f, 0x7c, 0xdf, 0xdd, 0x33, 0xd6, 0xef, 0xdd, 0x3d, 0x63, 0xfd,
    -	0xe1, 0xdd, 0x33, 0xd6, 0x9f, 0xde, 0x3d, 0x63, 0xfd, 0xfb, 0xbb, 0x67, 0xac, 0x2f, 0xfd, 0xc5,
    -	0x99, 0x0f, 0xbc, 0x85, 0xe2, 0xf8, 0xc7, 0x17, 0xe8, 0xe8, 0x5c, 0xd8, 0xbd, 0xf8, 0x3f, 0x02,
    -	0x00, 0x00, 0xff, 0xff, 0xfd, 0xca, 0x84, 0xba, 0xa5, 0x1e, 0x01, 0x00,
    +	// 16056 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7,
    +	0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12,
    +	0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd,
    +	0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b,
    +	0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb,
    +	0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27,
    +	0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97,
    +	0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14,
    +	0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17,
    +	0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf,
    +	0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7,
    +	0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c,
    +	0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f,
    +	0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c,
    +	0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15,
    +	0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46,
    +	0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e,
    +	0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66,
    +	0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40,
    +	0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e,
    +	0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2,
    +	0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15,
    +	0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d,
    +	0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0,
    +	0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce,
    +	0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae,
    +	0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18,
    +	0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61,
    +	0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b,
    +	0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1,
    +	0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d,
    +	0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd,
    +	0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35,
    +	0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9,
    +	0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53,
    +	0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46,
    +	0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33,
    +	0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96,
    +	0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80,
    +	0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65,
    +	0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c,
    +	0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda,
    +	0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b,
    +	0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51,
    +	0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e,
    +	0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6,
    +	0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2,
    +	0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90,
    +	0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf,
    +	0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7,
    +	0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6,
    +	0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80,
    +	0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02,
    +	0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7,
    +	0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5,
    +	0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe,
    +	0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a,
    +	0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05,
    +	0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe,
    +	0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67,
    +	0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16,
    +	0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02,
    +	0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e,
    +	0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07,
    +	0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28,
    +	0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11,
    +	0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2,
    +	0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd,
    +	0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9,
    +	0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95,
    +	0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24,
    +	0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd,
    +	0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf,
    +	0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c,
    +	0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b,
    +	0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34,
    +	0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b,
    +	0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c,
    +	0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8,
    +	0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce,
    +	0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd,
    +	0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1,
    +	0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b,
    +	0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a,
    +	0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0,
    +	0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2,
    +	0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66,
    +	0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28,
    +	0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04,
    +	0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55,
    +	0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10,
    +	0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b,
    +	0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18,
    +	0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b,
    +	0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e,
    +	0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48,
    +	0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04,
    +	0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34,
    +	0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80,
    +	0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c,
    +	0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52,
    +	0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7,
    +	0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f,
    +	0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85,
    +	0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a,
    +	0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2,
    +	0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1,
    +	0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8,
    +	0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b,
    +	0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d,
    +	0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd,
    +	0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64,
    +	0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9,
    +	0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e,
    +	0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08,
    +	0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91,
    +	0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61,
    +	0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e,
    +	0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d,
    +	0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35,
    +	0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5,
    +	0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37,
    +	0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47,
    +	0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75,
    +	0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48,
    +	0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30,
    +	0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08,
    +	0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53,
    +	0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f,
    +	0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda,
    +	0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f,
    +	0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7,
    +	0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d,
    +	0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa,
    +	0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a,
    +	0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc,
    +	0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23,
    +	0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39,
    +	0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d,
    +	0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43,
    +	0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb,
    +	0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c,
    +	0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40,
    +	0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14,
    +	0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00,
    +	0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64,
    +	0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43,
    +	0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b,
    +	0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58,
    +	0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a,
    +	0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33,
    +	0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06,
    +	0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08,
    +	0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56,
    +	0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27,
    +	0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65,
    +	0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35,
    +	0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e,
    +	0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3,
    +	0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22,
    +	0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a,
    +	0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53,
    +	0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74,
    +	0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11,
    +	0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95,
    +	0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06,
    +	0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2,
    +	0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3,
    +	0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b,
    +	0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb,
    +	0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f,
    +	0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51,
    +	0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46,
    +	0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0,
    +	0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4,
    +	0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1,
    +	0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63,
    +	0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d,
    +	0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65,
    +	0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e,
    +	0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f,
    +	0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4,
    +	0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b,
    +	0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44,
    +	0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1,
    +	0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7,
    +	0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02,
    +	0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19,
    +	0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10,
    +	0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59,
    +	0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37,
    +	0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb,
    +	0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28,
    +	0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68,
    +	0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd,
    +	0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01,
    +	0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74,
    +	0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63,
    +	0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8,
    +	0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77,
    +	0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa,
    +	0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb,
    +	0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b,
    +	0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3,
    +	0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05,
    +	0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c,
    +	0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb,
    +	0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d,
    +	0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb,
    +	0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3,
    +	0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e,
    +	0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc,
    +	0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f,
    +	0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd,
    +	0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72,
    +	0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0,
    +	0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77,
    +	0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb,
    +	0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41,
    +	0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8,
    +	0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2,
    +	0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68,
    +	0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86,
    +	0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5,
    +	0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4,
    +	0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d,
    +	0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70,
    +	0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda,
    +	0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f,
    +	0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95,
    +	0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b,
    +	0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98,
    +	0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8,
    +	0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e,
    +	0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5,
    +	0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5,
    +	0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2,
    +	0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c,
    +	0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d,
    +	0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1,
    +	0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3,
    +	0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f,
    +	0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28,
    +	0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94,
    +	0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3,
    +	0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14,
    +	0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06,
    +	0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5,
    +	0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa,
    +	0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2,
    +	0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05,
    +	0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef,
    +	0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d,
    +	0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67,
    +	0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2,
    +	0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70,
    +	0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04,
    +	0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4,
    +	0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81,
    +	0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59,
    +	0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76,
    +	0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6,
    +	0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36,
    +	0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c,
    +	0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89,
    +	0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f,
    +	0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d,
    +	0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe,
    +	0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a,
    +	0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03,
    +	0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9,
    +	0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c,
    +	0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43,
    +	0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16,
    +	0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8,
    +	0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81,
    +	0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23,
    +	0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0,
    +	0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38,
    +	0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96,
    +	0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55,
    +	0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd,
    +	0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60,
    +	0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa,
    +	0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15,
    +	0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10,
    +	0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6,
    +	0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa,
    +	0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88,
    +	0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64,
    +	0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2,
    +	0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc,
    +	0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1,
    +	0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33,
    +	0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81,
    +	0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e,
    +	0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe,
    +	0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e,
    +	0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85,
    +	0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e,
    +	0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f,
    +	0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02,
    +	0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0,
    +	0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43,
    +	0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07,
    +	0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd,
    +	0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f,
    +	0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41,
    +	0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07,
    +	0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a,
    +	0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80,
    +	0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97,
    +	0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8,
    +	0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4,
    +	0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e,
    +	0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48,
    +	0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88,
    +	0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67,
    +	0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b,
    +	0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3,
    +	0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe,
    +	0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96,
    +	0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25,
    +	0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97,
    +	0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab,
    +	0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e,
    +	0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8,
    +	0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41,
    +	0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8,
    +	0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68,
    +	0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56,
    +	0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c,
    +	0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe,
    +	0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec,
    +	0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c,
    +	0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0,
    +	0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0,
    +	0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33,
    +	0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81,
    +	0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c,
    +	0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f,
    +	0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87,
    +	0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba,
    +	0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77,
    +	0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04,
    +	0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0,
    +	0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a,
    +	0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82,
    +	0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a,
    +	0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81,
    +	0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50,
    +	0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc,
    +	0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb,
    +	0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee,
    +	0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e,
    +	0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98,
    +	0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43,
    +	0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b,
    +	0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a,
    +	0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80,
    +	0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac,
    +	0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44,
    +	0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6,
    +	0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb,
    +	0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58,
    +	0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31,
    +	0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51,
    +	0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52,
    +	0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f,
    +	0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19,
    +	0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79,
    +	0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f,
    +	0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2,
    +	0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc,
    +	0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46,
    +	0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd,
    +	0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30,
    +	0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01,
    +	0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32,
    +	0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d,
    +	0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31,
    +	0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c,
    +	0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38,
    +	0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8,
    +	0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3,
    +	0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b,
    +	0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4,
    +	0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea,
    +	0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72,
    +	0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2,
    +	0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b,
    +	0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92,
    +	0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76,
    +	0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e,
    +	0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89,
    +	0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e,
    +	0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f,
    +	0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7,
    +	0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a,
    +	0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b,
    +	0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f,
    +	0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59,
    +	0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85,
    +	0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92,
    +	0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb,
    +	0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35,
    +	0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41,
    +	0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b,
    +	0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae,
    +	0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e,
    +	0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3,
    +	0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04,
    +	0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24,
    +	0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7,
    +	0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4,
    +	0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf,
    +	0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27,
    +	0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7,
    +	0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58,
    +	0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86,
    +	0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51,
    +	0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9,
    +	0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d,
    +	0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14,
    +	0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba,
    +	0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97,
    +	0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e,
    +	0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27,
    +	0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86,
    +	0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe,
    +	0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e,
    +	0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95,
    +	0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26,
    +	0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0,
    +	0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca,
    +	0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42,
    +	0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7,
    +	0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7,
    +	0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd,
    +	0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd,
    +	0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0,
    +	0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0,
    +	0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f,
    +	0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3,
    +	0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3,
    +	0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed,
    +	0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa,
    +	0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb,
    +	0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25,
    +	0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae,
    +	0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30,
    +	0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23,
    +	0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb,
    +	0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50,
    +	0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34,
    +	0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b,
    +	0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26,
    +	0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f,
    +	0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c,
    +	0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf,
    +	0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27,
    +	0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44,
    +	0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf,
    +	0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8,
    +	0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2,
    +	0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67,
    +	0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62,
    +	0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a,
    +	0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4,
    +	0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61,
    +	0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3,
    +	0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2,
    +	0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17,
    +	0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba,
    +	0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf,
    +	0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3,
    +	0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4,
    +	0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17,
    +	0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f,
    +	0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc,
    +	0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74,
    +	0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37,
    +	0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76,
    +	0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde,
    +	0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39,
    +	0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f,
    +	0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5,
    +	0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a,
    +	0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25,
    +	0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06,
    +	0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06,
    +	0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c,
    +	0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74,
    +	0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f,
    +	0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46,
    +	0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18,
    +	0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8,
    +	0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34,
    +	0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a,
    +	0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76,
    +	0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66,
    +	0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e,
    +	0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b,
    +	0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30,
    +	0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9,
    +	0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50,
    +	0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58,
    +	0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e,
    +	0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82,
    +	0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e,
    +	0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4,
    +	0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f,
    +	0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42,
    +	0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0,
    +	0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16,
    +	0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda,
    +	0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb,
    +	0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86,
    +	0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38,
    +	0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33,
    +	0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52,
    +	0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca,
    +	0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32,
    +	0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6,
    +	0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37,
    +	0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68,
    +	0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf,
    +	0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e,
    +	0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44,
    +	0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88,
    +	0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48,
    +	0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb,
    +	0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44,
    +	0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf,
    +	0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8,
    +	0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95,
    +	0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89,
    +	0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16,
    +	0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30,
    +	0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d,
    +	0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c,
    +	0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca,
    +	0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d,
    +	0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27,
    +	0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a,
    +	0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50,
    +	0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80,
    +	0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28,
    +	0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28,
    +	0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26,
    +	0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b,
    +	0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6,
    +	0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4,
    +	0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25,
    +	0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b,
    +	0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5,
    +	0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf,
    +	0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe,
    +	0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb,
    +	0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c,
    +	0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f,
    +	0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa,
    +	0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37,
    +	0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb,
    +	0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e,
    +	0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45,
    +	0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c,
    +	0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c,
    +	0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6,
    +	0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f,
    +	0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2,
    +	0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf,
    +	0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6,
    +	0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76,
    +	0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20,
    +	0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c,
    +	0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57,
    +	0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7,
    +	0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab,
    +	0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52,
    +	0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7,
    +	0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0,
    +	0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a,
    +	0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7,
    +	0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6,
    +	0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78,
    +	0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39,
    +	0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad,
    +	0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b,
    +	0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7,
    +	0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe,
    +	0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16,
    +	0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58,
    +	0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb,
    +	0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22,
    +	0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9,
    +	0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1,
    +	0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1,
    +	0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb,
    +	0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e,
    +	0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91,
    +	0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39,
    +	0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8,
    +	0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c,
    +	0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e,
    +	0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3,
    +	0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae,
    +	0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01,
    +	0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa,
    +	0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04,
    +	0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0,
    +	0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68,
    +	0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c,
    +	0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06,
    +	0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca,
    +	0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47,
    +	0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8,
    +	0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84,
    +	0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba,
    +	0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde,
    +	0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35,
    +	0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15,
    +	0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41,
    +	0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2,
    +	0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2,
    +	0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7,
    +	0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17,
    +	0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c,
    +	0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8,
    +	0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4,
    +	0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72,
    +	0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8,
    +	0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed,
    +	0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25,
    +	0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f,
    +	0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5,
    +	0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c,
    +	0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b,
    +	0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95,
    +	0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e,
    +	0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97,
    +	0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38,
    +	0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2,
    +	0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62,
    +	0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36,
    +	0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08,
    +	0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98,
    +	0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79,
    +	0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f,
    +	0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a,
    +	0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5,
    +	0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a,
    +	0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a,
    +	0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04,
    +	0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae,
    +	0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a,
    +	0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e,
    +	0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3,
    +	0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a,
    +	0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc,
    +	0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd,
    +	0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c,
    +	0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e,
    +	0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21,
    +	0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27,
    +	0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b,
    +	0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37,
    +	0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0,
    +	0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b,
    +	0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6,
    +	0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb,
    +	0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd,
    +	0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e,
    +	0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c,
    +	0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81,
    +	0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd,
    +	0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3,
    +	0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c,
    +	0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03,
    +	0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88,
    +	0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b,
    +	0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6,
    +	0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a,
    +	0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb,
    +	0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05,
    +	0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50,
    +	0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb,
    +	0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5,
    +	0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e,
    +	0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8,
    +	0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40,
    +	0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3,
    +	0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5,
    +	0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16,
    +	0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92,
    +	0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78,
    +	0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80,
    +	0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09,
    +	0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56,
    +	0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6,
    +	0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9,
    +	0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21,
    +	0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66,
    +	0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91,
    +	0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac,
    +	0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11,
    +	0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2,
    +	0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60,
    +	0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f,
    +	0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae,
    +	0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a,
    +	0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f,
    +	0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e,
    +	0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c,
    +	0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88,
    +	0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0,
    +	0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10,
    +	0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4,
    +	0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d,
    +	0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6,
    +	0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53,
    +	0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7,
    +	0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e,
    +	0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7,
    +	0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05,
    +	0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd,
    +	0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47,
    +	0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11,
    +	0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3,
    +	0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35,
    +	0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc,
    +	0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42,
    +	0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a,
    +	0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3,
    +	0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f,
    +	0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15,
    +	0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e,
    +	0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb,
    +	0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50,
    +	0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b,
    +	0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f,
    +	0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c,
    +	0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87,
    +	0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87,
    +	0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56,
    +	0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a,
    +	0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99,
    +	0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e,
    +	0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf,
    +	0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c,
    +	0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3,
    +	0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88,
    +	0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c,
    +	0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e,
    +	0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3,
    +	0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c,
    +	0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d,
    +	0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb,
    +	0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75,
    +	0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae,
    +	0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe,
    +	0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40,
    +	0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9,
    +	0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b,
    +	0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85,
    +	0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb,
    +	0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda,
    +	0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f,
    +	0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74,
    +	0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf,
    +	0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d,
    +	0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b,
    +	0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1,
    +	0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa,
    +	0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d,
    +	0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed,
    +	0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8,
    +	0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93,
    +	0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35,
    +	0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13,
    +	0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0,
    +	0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69,
    +	0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17,
    +	0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49,
    +	0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5,
    +	0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3,
    +	0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0,
    +	0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7,
    +	0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63,
    +	0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24,
    +	0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63,
    +	0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e,
    +	0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f,
    +	0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc,
    +	0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12,
    +	0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06,
    +	0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18,
    +	0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0,
    +	0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99,
    +	0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1,
    +	0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9,
    +	0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5,
    +	0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2,
    +	0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5,
    +	0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71,
    +	0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91,
    +	0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f,
    +	0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08,
    +	0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb,
    +	0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b,
    +	0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda,
    +	0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e,
    +	0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad,
    +	0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d,
    +	0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61,
    +	0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b,
    +	0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f,
    +	0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53,
    +	0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7,
    +	0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4,
    +	0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98,
    +	0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05,
    +	0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec,
    +	0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26,
    +	0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3,
    +	0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49,
    +	0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74,
    +	0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79,
    +	0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6,
    +	0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4,
    +	0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07,
    +	0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75,
    +	0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35,
    +	0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca,
    +	0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf,
    +	0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf,
    +	0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a,
    +	0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27,
    +	0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e,
    +	0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54,
    +	0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23,
    +	0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a,
    +	0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8,
    +	0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62,
    +	0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb,
    +	0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7,
    +	0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0,
    +	0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98,
    +	0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f,
    +	0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28,
    +	0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34,
    +	0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98,
    +	0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37,
    +	0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33,
    +	0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61,
    +	0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f,
    +	0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee,
    +	0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39,
    +	0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e,
    +	0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0,
    +	0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05,
    +	0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b,
    +	0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54,
    +	0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2,
    +	0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57,
    +	0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53,
    +	0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e,
    +	0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7,
    +	0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31,
    +	0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9,
    +	0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3,
    +	0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23,
    +	0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca,
    +	0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f,
    +	0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7,
    +	0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96,
    +	0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6,
    +	0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80,
    +	0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a,
    +	0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12,
    +	0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84,
    +	0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2,
    +	0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7,
    +	0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d,
    +	0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41,
    +	0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee,
    +	0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9,
    +	0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f,
    +	0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d,
    +	0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02,
    +	0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31,
    +	0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59,
    +	0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21,
    +	0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4,
    +	0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05,
    +	0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0,
    +	0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0,
    +	0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40,
    +	0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13,
    +	0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13,
    +	0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38,
    +	0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43,
    +	0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65,
    +	0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2,
    +	0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab,
    +	0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03,
    +	0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91,
    +	0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e,
    +	0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54,
    +	0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c,
    +	0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74,
    +	0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3,
    +	0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03,
    +	0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0,
    +	0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba,
    +	0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d,
    +	0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe,
    +	0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88,
    +	0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a,
    +	0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a,
    +	0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5,
    +	0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2,
    +	0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b,
    +	0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2,
    +	0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67,
    +	0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84,
    +	0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a,
    +	0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a,
    +	0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf,
    +	0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1,
    +	0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7,
    +	0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6,
    +	0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07,
    +	0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45,
    +	0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5,
    +	0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9,
    +	0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1,
    +	0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6,
    +	0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d,
    +	0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e,
    +	0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5,
    +	0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21,
    +	0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72,
    +	0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81,
    +	0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe,
    +	0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde,
    +	0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94,
    +	0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43,
    +	0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d,
    +	0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73,
    +	0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e,
    +	0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5,
    +	0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34,
    +	0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46,
    +	0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25,
    +	0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc,
    +	0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa,
    +	0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c,
    +	0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc,
    +	0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92,
    +	0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac,
    +	0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5,
    +	0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30,
    +	0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3,
    +	0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8,
    +	0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4,
    +	0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e,
    +	0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20,
    +	0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4,
    +	0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f,
    +	0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe,
    +	0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2,
    +	0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc,
    +	0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61,
    +	0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5,
    +	0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d,
    +	0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b,
    +	0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70,
    +	0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2,
    +	0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22,
    +	0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d,
    +	0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44,
    +	0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86,
    +	0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1,
    +	0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c,
    +	0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55,
    +	0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c,
    +	0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d,
    +	0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94,
    +	0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb,
    +	0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2,
    +	0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf,
    +	0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10,
    +	0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa,
    +	0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d,
    +	0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70,
    +	0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06,
    +	0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8,
    +	0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e,
    +	0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f,
    +	0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad,
    +	0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44,
    +	0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
    +	0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00,
     }
     
     func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
    @@ -8494,43 +8661,6 @@ func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *ClaimSource) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ClaimSource) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ClaimSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.ResourceClaimTemplateName != nil {
    -		i -= len(*m.ResourceClaimTemplateName)
    -		copy(dAtA[i:], *m.ResourceClaimTemplateName)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName)))
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	if m.ResourceClaimName != nil {
    -		i -= len(*m.ResourceClaimName)
    -		copy(dAtA[i:], *m.ResourceClaimName)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName)))
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
     func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -9753,6 +9883,32 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if len(m.AllocatedResourcesStatus) > 0 {
    +		for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x72
    +		}
    +	}
    +	if m.User != nil {
    +		{
    +			size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x6a
    +	}
     	if len(m.VolumeMounts) > 0 {
     		for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -9872,6 +10028,41 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ContainerUser) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Linux != nil {
    +		{
    +			size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -11972,6 +12163,39 @@ func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.PullPolicy)
    +	copy(dAtA[i:], m.PullPolicy)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Reference)
    +	copy(dAtA[i:], m.Reference)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *KeyToPath) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -12428,6 +12652,42 @@ func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.SupplementalGroups) > 0 {
    +		for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- {
    +			i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx]))
    +			i--
    +			dAtA[i] = 0x18
    +		}
    +	}
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.GID))
    +	i--
    +	dAtA[i] = 0x10
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.UID))
    +	i--
    +	dAtA[i] = 0x8
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *List) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -13260,6 +13520,39 @@ func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *NodeFeatures) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.SupplementalGroupsPolicy != nil {
    +		i--
    +		if *m.SupplementalGroupsPolicy {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x8
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *NodeList) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -13395,6 +13688,16 @@ func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, err
     	_ = i
     	var l int
     	_ = l
    +	if m.UserNamespaces != nil {
    +		i--
    +		if *m.UserNamespaces {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x10
    +	}
     	if m.RecursiveReadOnlyMounts != nil {
     		i--
     		if *m.RecursiveReadOnlyMounts {
    @@ -13639,6 +13942,18 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.Features != nil {
    +		{
    +			size, err := m.Features.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x6a
    +	}
     	if len(m.RuntimeHandlers) > 0 {
     		for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- {
     			{
    @@ -15902,16 +16217,20 @@ func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    -	{
    -		size, err := m.Source.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	if m.ResourceClaimTemplateName != nil {
    +		i -= len(*m.ResourceClaimTemplateName)
    +		copy(dAtA[i:], *m.ResourceClaimTemplateName)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if m.ResourceClaimName != nil {
    +		i -= len(*m.ResourceClaimName)
    +		copy(dAtA[i:], *m.ResourceClaimName)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName)))
    +		i--
    +		dAtA[i] = 0x1a
     	}
    -	i--
    -	dAtA[i] = 0x12
     	i -= len(m.Name)
     	copy(dAtA[i:], m.Name)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    @@ -16003,6 +16322,13 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.SupplementalGroupsPolicy != nil {
    +		i -= len(*m.SupplementalGroupsPolicy)
    +		copy(dAtA[i:], *m.SupplementalGroupsPolicy)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy)))
    +		i--
    +		dAtA[i] = 0x62
    +	}
     	if m.AppArmorProfile != nil {
     		{
     			size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i])
    @@ -17878,6 +18204,11 @@ func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	i -= len(m.Request)
    +	copy(dAtA[i:], m.Request)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
    +	i--
    +	dAtA[i] = 0x12
     	i -= len(m.Name)
     	copy(dAtA[i:], m.Name)
     	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    @@ -17929,6 +18260,39 @@ func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ResourceHealth) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Health)
    +	copy(dAtA[i:], m.Health)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.ResourceID)
    +	copy(dAtA[i:], m.ResourceID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *ResourceQuota) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -18278,6 +18642,48 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *ResourceStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Resources) > 0 {
    +		for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -20738,6 +21144,20 @@ func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	_ = i
     	var l int
     	_ = l
    +	if m.Image != nil {
    +		{
    +			size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1
    +		i--
    +		dAtA[i] = 0xf2
    +	}
     	if m.Ephemeral != nil {
     		{
     			size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i])
    @@ -21596,23 +22016,6 @@ func (m *CinderVolumeSource) Size() (n int) {
     	return n
     }
     
    -func (m *ClaimSource) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.ResourceClaimName != nil {
    -		l = len(*m.ResourceClaimName)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.ResourceClaimTemplateName != nil {
    -		l = len(*m.ResourceClaimTemplateName)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
     func (m *ClientIPConfig) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -22097,6 +22500,29 @@ func (m *ContainerStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.User != nil {
    +		l = m.User.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.AllocatedResourcesStatus) > 0 {
    +		for _, e := range m.AllocatedResourcesStatus {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ContainerUser) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Linux != nil {
    +		l = m.Linux.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -22881,6 +23307,19 @@ func (m *ISCSIVolumeSource) Size() (n int) {
     	return n
     }
     
    +func (m *ImageVolumeSource) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Reference)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.PullPolicy)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *KeyToPath) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -23040,6 +23479,22 @@ func (m *LimitRangeSpec) Size() (n int) {
     	return n
     }
     
    +func (m *LinuxContainerUser) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	n += 1 + sovGenerated(uint64(m.UID))
    +	n += 1 + sovGenerated(uint64(m.GID))
    +	if len(m.SupplementalGroups) > 0 {
    +		for _, e := range m.SupplementalGroups {
    +			n += 1 + sovGenerated(uint64(e))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *List) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -23346,6 +23801,18 @@ func (m *NodeDaemonEndpoints) Size() (n int) {
     	return n
     }
     
    +func (m *NodeFeatures) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.SupplementalGroupsPolicy != nil {
    +		n += 2
    +	}
    +	return n
    +}
    +
     func (m *NodeList) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -23398,6 +23865,9 @@ func (m *NodeRuntimeHandlerFeatures) Size() (n int) {
     	if m.RecursiveReadOnlyMounts != nil {
     		n += 2
     	}
    +	if m.UserNamespaces != nil {
    +		n += 2
    +	}
     	return n
     }
     
    @@ -23558,6 +24028,10 @@ func (m *NodeStatus) Size() (n int) {
     			n += 1 + l + sovGenerated(uint64(l))
     		}
     	}
    +	if m.Features != nil {
    +		l = m.Features.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -24321,8 +24795,14 @@ func (m *PodResourceClaim) Size() (n int) {
     	_ = l
     	l = len(m.Name)
     	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Source.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    +	if m.ResourceClaimName != nil {
    +		l = len(*m.ResourceClaimName)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.ResourceClaimTemplateName != nil {
    +		l = len(*m.ResourceClaimTemplateName)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -24401,6 +24881,10 @@ func (m *PodSecurityContext) Size() (n int) {
     		l = m.AppArmorProfile.Size()
     		n += 1 + l + sovGenerated(uint64(l))
     	}
    +	if m.SupplementalGroupsPolicy != nil {
    +		l = len(*m.SupplementalGroupsPolicy)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -25040,6 +25524,8 @@ func (m *ResourceClaim) Size() (n int) {
     	_ = l
     	l = len(m.Name)
     	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Request)
    +	n += 1 + l + sovGenerated(uint64(l))
     	return n
     }
     
    @@ -25058,6 +25544,19 @@ func (m *ResourceFieldSelector) Size() (n int) {
     	return n
     }
     
    +func (m *ResourceHealth) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.ResourceID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Health)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
     func (m *ResourceQuota) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -25178,6 +25677,23 @@ func (m *ResourceRequirements) Size() (n int) {
     	return n
     }
     
    +func (m *ResourceStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Resources) > 0 {
    +		for _, e := range m.Resources {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *SELinuxOptions) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -26221,6 +26737,10 @@ func (m *VolumeSource) Size() (n int) {
     		l = m.Ephemeral.Size()
     		n += 2 + l + sovGenerated(uint64(l))
     	}
    +	if m.Image != nil {
    +		l = m.Image.Size()
    +		n += 2 + l + sovGenerated(uint64(l))
    +	}
     	return n
     }
     
    @@ -26516,17 +27036,6 @@ func (this *CinderVolumeSource) String() string {
     	}, "")
     	return s
     }
    -func (this *ClaimSource) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ClaimSource{`,
    -		`ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`,
    -		`ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
     func (this *ClientIPConfig) String() string {
     	if this == nil {
     		return "nil"
    @@ -26874,6 +27383,11 @@ func (this *ContainerStatus) String() string {
     		repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForVolumeMounts += "}"
    +	repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{"
    +	for _, f := range this.AllocatedResourcesStatus {
    +		repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForAllocatedResourcesStatus += "}"
     	keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources))
     	for k := range this.AllocatedResources {
     		keysForAllocatedResources = append(keysForAllocatedResources, string(k))
    @@ -26897,6 +27411,18 @@ func (this *ContainerStatus) String() string {
     		`AllocatedResources:` + mapStringForAllocatedResources + `,`,
     		`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
     		`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
    +		`User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`,
    +		`AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ContainerUser) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ContainerUser{`,
    +		`Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -27480,6 +28006,17 @@ func (this *ISCSIVolumeSource) String() string {
     	}, "")
     	return s
     }
    +func (this *ImageVolumeSource) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ImageVolumeSource{`,
    +		`Reference:` + fmt.Sprintf("%v", this.Reference) + `,`,
    +		`PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *KeyToPath) String() string {
     	if this == nil {
     		return "nil"
    @@ -27623,6 +28160,18 @@ func (this *LimitRangeSpec) String() string {
     	}, "")
     	return s
     }
    +func (this *LinuxContainerUser) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&LinuxContainerUser{`,
    +		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
    +		`GID:` + fmt.Sprintf("%v", this.GID) + `,`,
    +		`SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *List) String() string {
     	if this == nil {
     		return "nil"
    @@ -27871,6 +28420,16 @@ func (this *NodeDaemonEndpoints) String() string {
     	}, "")
     	return s
     }
    +func (this *NodeFeatures) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&NodeFeatures{`,
    +		`SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *NodeList) String() string {
     	if this == nil {
     		return "nil"
    @@ -27914,6 +28473,7 @@ func (this *NodeRuntimeHandlerFeatures) String() string {
     	}
     	s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`,
     		`RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`,
    +		`UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -28049,6 +28609,7 @@ func (this *NodeStatus) String() string {
     		`VolumesAttached:` + repeatedStringForVolumesAttached + `,`,
     		`Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`,
     		`RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`,
    +		`Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -28577,7 +29138,8 @@ func (this *PodResourceClaim) String() string {
     	}
     	s := strings.Join([]string{`&PodResourceClaim{`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Source:` + strings.Replace(strings.Replace(this.Source.String(), "ClaimSource", "ClaimSource", 1), `&`, ``, 1) + `,`,
    +		`ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`,
    +		`ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -28624,6 +29186,7 @@ func (this *PodSecurityContext) String() string {
     		`FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`,
     		`SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`,
     		`AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`,
    +		`SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -29125,6 +29688,7 @@ func (this *ResourceClaim) String() string {
     	}
     	s := strings.Join([]string{`&ResourceClaim{`,
     		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -29141,6 +29705,17 @@ func (this *ResourceFieldSelector) String() string {
     	}, "")
     	return s
     }
    +func (this *ResourceHealth) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceHealth{`,
    +		`ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`,
    +		`Health:` + fmt.Sprintf("%v", this.Health) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *ResourceQuota) String() string {
     	if this == nil {
     		return "nil"
    @@ -29259,6 +29834,22 @@ func (this *ResourceRequirements) String() string {
     	}, "")
     	return s
     }
    +func (this *ResourceStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForResources := "[]ResourceHealth{"
    +	for _, f := range this.Resources {
    +		repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForResources += "}"
    +	s := strings.Join([]string{`&ResourceStatus{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Resources:` + repeatedStringForResources + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *SELinuxOptions) String() string {
     	if this == nil {
     		return "nil"
    @@ -29967,6 +30558,7 @@ func (this *VolumeSource) String() string {
     		`StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
     		`CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`,
     		`Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`,
    +		`Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`,
     		`}`,
     	}, "")
     	return s
    @@ -32985,122 +33577,6 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ClaimSource) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ClaimSource: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ClaimSource: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.ResourceClaimName = &s
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.ResourceClaimTemplateName = &s
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
     func (m *ClientIPConfig) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    @@ -37312,6 +37788,162 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 13:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.User == nil {
    +				m.User = &ContainerUser{}
    +			}
    +			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 14:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
    +			if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ContainerUser) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Linux == nil {
    +				m.Linux = &LinuxContainerUser{}
    +			}
    +			if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -43256,11 +43888,369 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Path = string(dAtA[iNdEx:postIndex])
    +			m.Path = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := HostPathType(dAtA[iNdEx:postIndex])
    +			m.Type = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.TargetPortal = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.IQN = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
    +			}
    +			m.Lun = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Lun |= int32(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ISCSIInterface = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.FSType = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 6:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.ReadOnly = bool(v != 0)
    +		case 7:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex]))
     			iNdEx = postIndex
    -		case 2:
    +		case 8:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.DiscoveryCHAPAuth = bool(v != 0)
    +		case 10:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.SecretRef == nil {
    +				m.SecretRef = &SecretReference{}
    +			}
    +			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 11:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.SessionCHAPAuth = bool(v != 0)
    +		case 12:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -43288,8 +44278,8 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := HostPathType(dAtA[iNdEx:postIndex])
    -			m.Type = &s
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.InitiatorName = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -43312,7 +44302,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
    +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -43335,10 +44325,10 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    @@ -43590,7 +44580,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
     				return io.ErrUnexpectedEOF
     			}
     			if m.SecretRef == nil {
    -				m.SecretRef = &SecretReference{}
    +				m.SecretRef = &LocalObjectReference{}
     			}
     			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
    @@ -43670,7 +44660,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
    +func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -43693,182 +44683,15 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.TargetPortal = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.IQN = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
    -			}
    -			m.Lun = 0
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				m.Lun |= int32(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ISCSIInterface = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.FSType = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 6:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.ReadOnly = bool(v != 0)
    -		case 7:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -43896,87 +44719,11 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex]))
    +			m.Reference = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 8:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.DiscoveryCHAPAuth = bool(v != 0)
    -		case 10:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.SecretRef == nil {
    -				m.SecretRef = &LocalObjectReference{}
    -			}
    -			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 11:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.SessionCHAPAuth = bool(v != 0)
    -		case 12:
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -44004,8 +44751,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.InitiatorName = &s
    +			m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -45344,17 +46090,218 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, LimitRange{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Limits = append(m.Limits, LimitRangeItem{})
    +			if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
     			}
    -			var msglen int
    +			m.UID = 0
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -45364,30 +46311,16 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				m.UID |= int64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
     		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType)
     			}
    -			var msglen int
    +			m.GID = 0
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -45397,110 +46330,87 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				m.GID |= int64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, LimitRange{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    +		case 3:
    +			if wireType == 0 {
    +				var v int64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					v |= int64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
     				}
    -				if iNdEx >= l {
    +				m.SupplementalGroups = append(m.SupplementalGroups, v)
    +			} else if wireType == 2 {
    +				var packedLen int
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					packedLen |= int(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				if packedLen < 0 {
    +					return ErrInvalidLengthGenerated
    +				}
    +				postIndex := iNdEx + packedLen
    +				if postIndex < 0 {
    +					return ErrInvalidLengthGenerated
    +				}
    +				if postIndex > l {
     					return io.ErrUnexpectedEOF
     				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    +				var elementCount int
    +				var count int
    +				for _, integer := range dAtA[iNdEx:postIndex] {
    +					if integer < 128 {
    +						count++
    +					}
     				}
    +				elementCount = count
    +				if elementCount != 0 && len(m.SupplementalGroups) == 0 {
    +					m.SupplementalGroups = make([]int64, 0, elementCount)
    +				}
    +				for iNdEx < postIndex {
    +					var v int64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						v |= int64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					m.SupplementalGroups = append(m.SupplementalGroups, v)
    +				}
    +			} else {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType)
     			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Limits = append(m.Limits, LimitRangeItem{})
    -			if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -47950,17 +48860,100 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group")
    +			return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *NodeFeatures) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType)
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType)
     			}
    -			var msglen int
    +			var v int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -47970,25 +48963,13 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				v |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    +			b := bool(v != 0)
    +			m.SupplementalGroupsPolicy = &b
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -48377,6 +49358,27 @@ func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error {
     			}
     			b := bool(v != 0)
     			m.RecursiveReadOnlyMounts = &b
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.UserNamespaces = &b
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -49637,6 +50639,42 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 13:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Features == nil {
    +				m.Features = &NodeFeatures{}
    +			}
    +			if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -56370,11 +57408,11 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error {
     			}
     			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 2:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -56384,24 +57422,57 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.ResourceClaimName = &s
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType)
     			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.ResourceClaimTemplateName = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -57018,6 +58089,39 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 12:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex])
    +			m.SupplementalGroupsPolicy = &s
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -62585,7 +63689,121 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
    +func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Request = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -62608,15 +63826,15 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -62644,7 +63862,72 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    +			m.ContainerName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resource = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -62667,7 +63950,7 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
    +func (m *ResourceHealth) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -62690,15 +63973,15 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -62726,11 +64009,11 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.ContainerName = string(dAtA[iNdEx:postIndex])
    +			m.ResourceID = ResourceID(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -62758,40 +64041,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Resource = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -63977,6 +65227,122 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *ResourceStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = ResourceName(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resources = append(m.Resources, ResourceHealth{})
    +			if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *SELinuxOptions) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    @@ -73029,6 +74395,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error {
     				return err
     			}
     			iNdEx = postIndex
    +		case 30:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Image == nil {
    +				m.Image = &ImageVolumeSource{}
    +			}
    +			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
    index f3b47c722d..68ac80ed0b 100644
    --- a/vendor/k8s.io/api/core/v1/generated.proto
    +++ b/vendor/k8s.io/api/core/v1/generated.proto
    @@ -126,20 +126,24 @@ message AzureDiskVolumeSource {
     
       // cachingMode is the Host Caching mode: None, Read Only, Read Write.
       // +optional
    +  // +default=ref(AzureDataDiskCachingReadWrite)
       optional string cachingMode = 3;
     
       // fsType is Filesystem type to mount.
       // Must be a filesystem type supported by the host operating system.
       // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
       // +optional
    +  // +default="ext4"
       optional string fsType = 4;
     
       // readOnly Defaults to false (read/write). ReadOnly here will force
       // the ReadOnly setting in VolumeMounts.
       // +optional
    +  // +default=false
       optional bool readOnly = 5;
     
       // kind expected values are Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
    +  // +default=ref(AzureSharedBlobDisk)
       optional string kind = 6;
     }
     
    @@ -182,7 +186,7 @@ message Binding {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The target object that you want to bind to the standard object.
       optional ObjectReference target = 2;
    @@ -422,30 +426,6 @@ message CinderVolumeSource {
       optional LocalObjectReference secretRef = 4;
     }
     
    -// ClaimSource describes a reference to a ResourceClaim.
    -//
    -// Exactly one of these fields should be set.  Consumers of this type must
    -// treat an empty object as if it has an unknown value.
    -message ClaimSource {
    -  // ResourceClaimName is the name of a ResourceClaim object in the same
    -  // namespace as this pod.
    -  optional string resourceClaimName = 1;
    -
    -  // ResourceClaimTemplateName is the name of a ResourceClaimTemplate
    -  // object in the same namespace as this pod.
    -  //
    -  // The template will be used to create a new ResourceClaim, which will
    -  // be bound to this pod. When this pod is deleted, the ResourceClaim
    -  // will also be deleted. The pod name and resource name, along with a
    -  // generated component, will be used to form a unique name for the
    -  // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
    -  //
    -  // This field is immutable and no changes will be made to the
    -  // corresponding ResourceClaim by the control plane after creating the
    -  // ResourceClaim.
    -  optional string resourceClaimTemplateName = 2;
    -}
    -
     // ClientIPConfig represents the configurations of Client IP based session affinity.
     message ClientIPConfig {
       // timeoutSeconds specifies the seconds of ClientIP type session sticky time.
    @@ -475,7 +455,7 @@ message ClusterTrustBundleProjection {
       // interpreted as "match nothing".  If set but empty, interpreted as "match
       // everything".
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3;
     
       // If true, don't block pod startup if the referenced ClusterTrustBundle(s)
       // aren't available.  If using name, then the named ClusterTrustBundle is
    @@ -516,7 +496,7 @@ message ComponentStatus {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // List of component conditions observed
       // +optional
    @@ -533,7 +513,7 @@ message ComponentStatusList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ComponentStatus objects.
       repeated ComponentStatus items = 2;
    @@ -544,7 +524,7 @@ message ConfigMap {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Immutable, if set to true, ensures that data stored in the ConfigMap cannot
       // be updated (only object metadata can be modified).
    @@ -604,7 +584,7 @@ message ConfigMapKeySelector {
     message ConfigMapList {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of ConfigMaps.
       repeated ConfigMap items = 2;
    @@ -984,7 +964,7 @@ message ContainerState {
     message ContainerStateRunning {
       // Time at which the container was last (re-)started
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1;
     }
     
     // ContainerStateTerminated is a terminated state of a container.
    @@ -1006,11 +986,11 @@ message ContainerStateTerminated {
     
       // Time at which previous execution of the container started
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5;
     
       // Time at which the container last terminated
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6;
     
       // Container's ID in the format '://'
       // +optional
    @@ -1093,7 +1073,7 @@ message ContainerStatus {
       // and after successfully admitting desired pod resize.
       // +featureGate=InPlacePodVerticalScaling
       // +optional
    -  map allocatedResources = 10;
    +  map allocatedResources = 10;
     
       // Resources represents the compute resource requests and limits that have been successfully
       // enacted on the running container after it has been started or has been successfully resized.
    @@ -1109,6 +1089,29 @@ message ContainerStatus {
       // +listMapKey=mountPath
       // +featureGate=RecursiveReadOnlyMounts
       repeated VolumeMountStatus volumeMounts = 12;
    +
    +  // User represents user identity information initially attached to the first process of the container
    +  // +featureGate=SupplementalGroupsPolicy
    +  // +optional
    +  optional ContainerUser user = 13;
    +
    +  // AllocatedResourcesStatus represents the status of various resources
    +  // allocated for this Pod.
    +  // +featureGate=ResourceHealthStatus
    +  // +optional
    +  // +patchMergeKey=name
    +  // +patchStrategy=merge
    +  // +listType=map
    +  // +listMapKey=name
    +  repeated ResourceStatus allocatedResourcesStatus = 14;
    +}
    +
    +// ContainerUser represents user identity information
    +message ContainerUser {
    +  // Linux holds user identity information initially attached to the first process of the containers in Linux.
    +  // Note that the actual running identity can be changed if the process has enough privilege to do so.
    +  // +optional
    +  optional LinuxContainerUser linux = 1;
     }
     
     // DaemonEndpoint contains information about a single Daemon endpoint.
    @@ -1188,7 +1191,7 @@ message EmptyDirVolumeSource {
       // The default is nil which means that the limit is undefined.
       // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2;
     }
     
     // EndpointAddress is a tuple that describes single IP address.
    @@ -1300,7 +1303,7 @@ message Endpoints {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The set of all endpoints is the union of all subsets. Addresses are placed into
       // subsets according to the IPs they share. A single address with multiple ports,
    @@ -1319,7 +1322,7 @@ message EndpointsList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of endpoints.
       repeated Endpoints items = 2;
    @@ -1622,7 +1625,7 @@ message EphemeralVolumeSource {
     message Event {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The object that this event is about.
       optional ObjectReference involvedObject = 2;
    @@ -1644,11 +1647,11 @@ message Event {
     
       // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6;
     
       // The time at which the most recent occurrence of this event was recorded.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7;
     
       // The number of times this event has occurred.
       // +optional
    @@ -1660,7 +1663,7 @@ message Event {
     
       // Time when this Event was first observed.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10;
     
       // Data about the Event series this event represents or nil if it's a singleton Event.
       // +optional
    @@ -1688,7 +1691,7 @@ message EventList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of events
       repeated Event items = 2;
    @@ -1701,7 +1704,7 @@ message EventSeries {
       optional int32 count = 1;
     
       // Time of the last occurrence observed
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
     }
     
     // EventSource contains information for an event.
    @@ -1954,7 +1957,7 @@ message HTTPGetAction {
       // Name or number of the port to access on the container.
       // Number must be in the range 1 to 65535.
       // Name must be an IANA_SVC_NAME.
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
     
       // Host name to connect to, defaults to the pod IP. You probably want to set
       // "Host" in httpHeaders instead.
    @@ -1997,6 +2000,7 @@ message HostAlias {
     // HostIP represents a single IP address allocated to the host.
     message HostIP {
       // IP is the IP address assigned to the host
    +  // +required
       optional string ip = 1;
     }
     
    @@ -2032,6 +2036,7 @@ message ISCSIPersistentVolumeSource {
       // iscsiInterface is the interface Name that uses an iSCSI transport.
       // Defaults to 'default' (tcp).
       // +optional
    +  // +default="default"
       optional string iscsiInterface = 4;
     
       // fsType is the filesystem type of the volume that you want to mount.
    @@ -2089,6 +2094,7 @@ message ISCSIVolumeSource {
       // iscsiInterface is the interface Name that uses an iSCSI transport.
       // Defaults to 'default' (tcp).
       // +optional
    +  // +default="default"
       optional string iscsiInterface = 4;
     
       // fsType is the filesystem type of the volume that you want to mount.
    @@ -2129,6 +2135,26 @@ message ISCSIVolumeSource {
       optional string initiatorName = 12;
     }
     
    +// ImageVolumeSource represents a image volume resource.
    +message ImageVolumeSource {
    +  // Required: Image or artifact reference to be used.
    +  // Behaves in the same way as pod.spec.containers[*].image.
    +  // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
    +  // More info: https://kubernetes.io/docs/concepts/containers/images
    +  // This field is optional to allow higher level config management to default or override
    +  // container images in workload controllers like Deployments and StatefulSets.
    +  // +optional
    +  optional string reference = 1;
    +
    +  // Policy for pulling OCI objects. Possible values are:
    +  // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
    +  // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
    +  // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
    +  // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
    +  // +optional
    +  optional string pullPolicy = 2;
    +}
    +
     // Maps a string key to a path within a volume.
     message KeyToPath {
       // key is the key to project.
    @@ -2202,7 +2228,7 @@ message LimitRange {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the limits enforced.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -2217,23 +2243,23 @@ message LimitRangeItem {
     
       // Max usage constraints on this kind by resource name.
       // +optional
    -  map max = 2;
    +  map max = 2;
     
       // Min usage constraints on this kind by resource name.
       // +optional
    -  map min = 3;
    +  map min = 3;
     
       // Default resource requirement limit value by resource name if resource limit is omitted.
       // +optional
    -  map default = 4;
    +  map default = 4;
     
       // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
       // +optional
    -  map defaultRequest = 5;
    +  map defaultRequest = 5;
     
       // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
       // +optional
    -  map maxLimitRequestRatio = 6;
    +  map maxLimitRequestRatio = 6;
     }
     
     // LimitRangeList is a list of LimitRange items.
    @@ -2241,7 +2267,7 @@ message LimitRangeList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of LimitRange objects.
       // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    @@ -2255,15 +2281,29 @@ message LimitRangeSpec {
       repeated LimitRangeItem limits = 1;
     }
     
    +// LinuxContainerUser represents user identity information in Linux containers
    +message LinuxContainerUser {
    +  // UID is the primary uid initially attached to the first process in the container
    +  optional int64 uid = 1;
    +
    +  // GID is the primary gid initially attached to the first process in the container
    +  optional int64 gid = 2;
    +
    +  // SupplementalGroups are the supplemental groups initially attached to the first process in the container
    +  // +optional
    +  // +listType=atomic
    +  repeated int64 supplementalGroups = 3;
    +}
    +
     // List holds a list of objects, which may not be known by the server.
     message List {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of objects
    -  repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
    +  repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
     }
     
     // LoadBalancerIngress represents the status of a load-balancer ingress point:
    @@ -2377,7 +2417,7 @@ message Namespace {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the behavior of the Namespace.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -2399,7 +2439,7 @@ message NamespaceCondition {
       optional string status = 2;
     
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // +optional
       optional string reason = 5;
    @@ -2413,7 +2453,7 @@ message NamespaceList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Namespace objects in the list.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
    @@ -2451,7 +2491,7 @@ message Node {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the behavior of a node.
       // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -2509,11 +2549,11 @@ message NodeCondition {
     
       // Last time we got an update on a given condition.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3;
     
       // Last time the condition transit from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // (brief) reason for the condition's last transition.
       // +optional
    @@ -2588,12 +2628,21 @@ message NodeDaemonEndpoints {
       optional DaemonEndpoint kubeletEndpoint = 1;
     }
     
    +// NodeFeatures describes the set of features implemented by the CRI implementation.
    +// The features contained in the NodeFeatures should depend only on the cri implementation
    +// independent of runtime handlers.
    +message NodeFeatures {
    +  // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
    +  // +optional
    +  optional bool supplementalGroupsPolicy = 1;
    +}
    +
     // NodeList is the whole list of all Nodes which have been registered with master.
     message NodeList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of nodes
       repeated Node items = 2;
    @@ -2618,12 +2667,17 @@ message NodeRuntimeHandler {
       optional NodeRuntimeHandlerFeatures features = 2;
     }
     
    -// NodeRuntimeHandlerFeatures is a set of runtime features.
    +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
     message NodeRuntimeHandlerFeatures {
       // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
       // +featureGate=RecursiveReadOnlyMounts
       // +optional
       optional bool recursiveReadOnlyMounts = 1;
    +
    +  // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
    +  // +featureGate=UserNamespacesSupport
    +  // +optional
    +  optional bool userNamespaces = 2;
     }
     
     // A node selector represents the union of the results of one or more label queries
    @@ -2713,14 +2767,14 @@ message NodeSpec {
     // NodeStatus is information about the current status of a node.
     message NodeStatus {
       // Capacity represents the total resources of a node.
    -  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
    +  // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
       // +optional
    -  map capacity = 1;
    +  map capacity = 1;
     
       // Allocatable represents the resources of a node that are available for scheduling.
       // Defaults to Capacity.
       // +optional
    -  map allocatable = 2;
    +  map allocatable = 2;
     
       // NodePhase is the recently observed lifecycle phase of the node.
       // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
    @@ -2784,9 +2838,15 @@ message NodeStatus {
     
       // The available runtime handlers.
       // +featureGate=RecursiveReadOnlyMounts
    +  // +featureGate=UserNamespacesSupport
       // +optional
       // +listType=atomic
       repeated NodeRuntimeHandler runtimeHandlers = 12;
    +
    +  // Features describes the set of features implemented by the CRI implementation.
    +  // +featureGate=SupplementalGroupsPolicy
    +  // +optional
    +  optional NodeFeatures features = 13;
     }
     
     // NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
    @@ -2816,7 +2876,7 @@ message NodeSystemInfo {
       // Kubelet Version reported by the node.
       optional string kubeletVersion = 7;
     
    -  // KubeProxy Version reported by the node.
    +  // Deprecated: KubeProxy Version reported by the node.
       optional string kubeProxyVersion = 8;
     
       // The Operating System reported by the node
    @@ -2904,7 +2964,7 @@ message PersistentVolume {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec defines a specification of a persistent volume owned by the cluster.
       // Provisioned by an administrator.
    @@ -2925,7 +2985,7 @@ message PersistentVolumeClaim {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec defines the desired characteristics of a volume requested by a pod author.
       // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    @@ -2947,11 +3007,11 @@ message PersistentVolumeClaimCondition {
     
       // lastProbeTime is the time we probed the condition.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
     
       // lastTransitionTime is the time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // reason is a unique, this should be a short, machine understandable string that gives the reason
       // for condition's last transition. If it reports "Resizing" that means the underlying
    @@ -2969,7 +3029,7 @@ message PersistentVolumeClaimList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of persistent volume claims.
       // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    @@ -2987,7 +3047,7 @@ message PersistentVolumeClaimSpec {
     
       // selector is a label query over volumes to consider for binding.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
     
       // resources represents the minimum resources the volume should have.
       // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
    @@ -3059,7 +3119,7 @@ message PersistentVolumeClaimSpec {
       // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
       // exists.
       // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
    -  // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
    +  // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string volumeAttributesClassName = 9;
    @@ -3079,7 +3139,7 @@ message PersistentVolumeClaimStatus {
     
       // capacity represents the actual resources of the underlying volume.
       // +optional
    -  map capacity = 3;
    +  map capacity = 3;
     
       // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
       // resized then the Condition will be set to 'Resizing'.
    @@ -3114,7 +3174,7 @@ message PersistentVolumeClaimStatus {
       // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
       // +featureGate=RecoverVolumeExpansionFailure
       // +optional
    -  map allocatedResources = 5;
    +  map allocatedResources = 5;
     
       // allocatedResourceStatuses stores status of resource being resized for the given PVC.
       // Key names follow standard Kubernetes label syntax. Valid values are either:
    @@ -3158,14 +3218,14 @@ message PersistentVolumeClaimStatus {
     
       // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
       // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
    -  // This is an alpha field and requires enabling VolumeAttributesClass feature.
    +  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string currentVolumeAttributesClassName = 8;
     
       // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
       // When this is unset, there is no ModifyVolume operation being attempted.
    -  // This is an alpha field and requires enabling VolumeAttributesClass feature.
    +  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional ModifyVolumeStatus modifyVolumeStatus = 9;
    @@ -3179,7 +3239,7 @@ message PersistentVolumeClaimTemplate {
       // validation.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The specification for the PersistentVolumeClaim. The entire content is
       // copied unchanged into the PVC that gets created from this
    @@ -3208,7 +3268,7 @@ message PersistentVolumeList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of persistent volumes.
       // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
    @@ -3327,7 +3387,7 @@ message PersistentVolumeSpec {
       // capacity is the description of the persistent volume's resources and capacity.
       // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
       // +optional
    -  map capacity = 1;
    +  map capacity = 1;
     
       // persistentVolumeSource is the actual volume backing the persistent volume.
       optional PersistentVolumeSource persistentVolumeSource = 2;
    @@ -3382,7 +3442,7 @@ message PersistentVolumeSpec {
       // after a volume has been updated successfully to a new class.
       // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
       // PersistentVolumeClaims during the binding process.
    -  // This is an alpha field and requires enabling VolumeAttributesClass feature.
    +  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
       // +featureGate=VolumeAttributesClass
       // +optional
       optional string volumeAttributesClassName = 10;
    @@ -3406,10 +3466,8 @@ message PersistentVolumeStatus {
     
       // lastPhaseTransitionTime is the time the phase transitioned from one to another
       // and automatically resets to current time everytime a volume phase transitions.
    -  // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).
    -  // +featureGate=PersistentVolumeLastPhaseTransitionTime
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4;
     }
     
     // Represents a Photon Controller persistent disk resource.
    @@ -3429,7 +3487,7 @@ message Pod {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the pod.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -3482,7 +3540,7 @@ message PodAffinityTerm {
       // A label query over a set of resources, in this case pods.
       // If it's null, this PodAffinityTerm matches with no Pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
     
       // namespaces specifies a static list of namespace names that the term applies to.
       // The term is applied to the union of the namespaces listed in this field
    @@ -3505,7 +3563,7 @@ message PodAffinityTerm {
       // null selector and null or empty namespaces list means "this pod's namespace".
       // An empty selector ({}) matches all namespaces.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4;
     
       // MatchLabelKeys is a set of pod label keys to select which pods will
       // be taken into consideration. The keys are used to lookup values from the
    @@ -3515,7 +3573,8 @@ message PodAffinityTerm {
       // pod labels will be ignored. The default value is empty.
       // The same key is forbidden to exist in both matchLabelKeys and labelSelector.
       // Also, matchLabelKeys cannot be set when labelSelector isn't set.
    -  // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    +  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
    +  //
       // +listType=atomic
       // +optional
       repeated string matchLabelKeys = 5;
    @@ -3528,7 +3587,8 @@ message PodAffinityTerm {
       // pod labels will be ignored. The default value is empty.
       // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
       // Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
    -  // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    +  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
    +  //
       // +listType=atomic
       // +optional
       repeated string mismatchLabelKeys = 6;
    @@ -3607,11 +3667,11 @@ message PodCondition {
     
       // Last time we probed the condition.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
     
       // Unique, one-word, CamelCase reason for the condition's last transition.
       // +optional
    @@ -3693,6 +3753,7 @@ message PodExecOptions {
     // PodIP represents a single IP address allocated to the pod.
     message PodIP {
       // IP is the IP address assigned to the pod
    +  // +required
       optional string ip = 1;
     }
     
    @@ -3701,7 +3762,7 @@ message PodList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of pods.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
    @@ -3734,7 +3795,7 @@ message PodLogOptions {
       // If this value is in the future, no logs will be returned.
       // Only one of sinceSeconds or sinceTime may be specified.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
     
       // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
       // of log output. Defaults to false.
    @@ -3798,7 +3859,10 @@ message PodReadinessGate {
       optional string conditionType = 1;
     }
     
    -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
    +// PodResourceClaim references exactly one ResourceClaim, either directly
    +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
    +// for the pod.
    +//
     // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
     // Containers that need access to the ResourceClaim reference it with this name.
     message PodResourceClaim {
    @@ -3806,8 +3870,29 @@ message PodResourceClaim {
       // This must be a DNS_LABEL.
       optional string name = 1;
     
    -  // Source describes where to find the ResourceClaim.
    -  optional ClaimSource source = 2;
    +  // ResourceClaimName is the name of a ResourceClaim object in the same
    +  // namespace as this pod.
    +  //
    +  // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
    +  // be set.
    +  optional string resourceClaimName = 3;
    +
    +  // ResourceClaimTemplateName is the name of a ResourceClaimTemplate
    +  // object in the same namespace as this pod.
    +  //
    +  // The template will be used to create a new ResourceClaim, which will
    +  // be bound to this pod. When this pod is deleted, the ResourceClaim
    +  // will also be deleted. The pod name and resource name, along with a
    +  // generated component, will be used to form a unique name for the
    +  // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
    +  //
    +  // This field is immutable and no changes will be made to the
    +  // corresponding ResourceClaim by the control plane after creating the
    +  // ResourceClaim.
    +  //
    +  // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
    +  // be set.
    +  optional string resourceClaimTemplateName = 4;
     }
     
     // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
    @@ -3820,7 +3905,7 @@ message PodResourceClaimStatus {
       optional string name = 1;
     
       // ResourceClaimName is the name of the ResourceClaim that was
    -  // generated for the Pod in the namespace of the Pod. It this is
    +  // generated for the Pod in the namespace of the Pod. If this is
       // unset, then generating a ResourceClaim was not necessary. The
       // pod.spec.resourceClaims entry can be ignored in this case.
       //
    @@ -3882,17 +3967,29 @@ message PodSecurityContext {
       // +optional
       optional bool runAsNonRoot = 3;
     
    -  // A list of groups applied to the first process run in each container, in addition
    -  // to the container's primary GID, the fsGroup (if specified), and group memberships
    -  // defined in the container image for the uid of the container process. If unspecified,
    -  // no additional groups are added to any container. Note that group memberships
    -  // defined in the container image for the uid of the container process are still effective,
    -  // even if they are not included in this list.
    +  // A list of groups applied to the first process run in each container, in
    +  // addition to the container's primary GID and fsGroup (if specified).  If
    +  // the SupplementalGroupsPolicy feature is enabled, the
    +  // supplementalGroupsPolicy field determines whether these are in addition
    +  // to or instead of any group memberships defined in the container image.
    +  // If unspecified, no additional groups are added, though group memberships
    +  // defined in the container image may still be used, depending on the
    +  // supplementalGroupsPolicy field.
       // Note that this field cannot be set when spec.os.name is windows.
       // +optional
       // +listType=atomic
       repeated int64 supplementalGroups = 4;
     
    +  // Defines how supplemental groups of the first container processes are calculated.
    +  // Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
    +  // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
    +  // and the container runtime must implement support for this feature.
    +  // Note that this field cannot be set when spec.os.name is windows.
    +  // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
    +  // +featureGate=SupplementalGroupsPolicy
    +  // +optional
    +  optional string supplementalGroupsPolicy = 12;
    +
       // A special supplemental group that applies to all containers in a pod.
       // Some volume types allow the Kubelet to change the ownership of that volume
       // to be owned by the pod:
    @@ -3939,7 +4036,7 @@ message PodSecurityContext {
     message PodSignature {
       // Reference to controller whose pods should avoid this node.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1;
     }
     
     // PodSpec is a description of a pod.
    @@ -4048,9 +4145,11 @@ message PodSpec {
       // +optional
       optional bool automountServiceAccountToken = 21;
     
    -  // NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
    -  // the scheduler simply schedules this pod onto that node, assuming that it fits resource
    -  // requirements.
    +  // NodeName indicates in which node this pod is scheduled.
    +  // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
    +  // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
    +  // This field should not be used to express a desire for the pod to be scheduled on a specific node.
    +  // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
       // +optional
       optional string nodeName = 10;
     
    @@ -4189,7 +4288,7 @@ message PodSpec {
       // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
       // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
       // +optional
    -  map overhead = 32;
    +  map overhead = 32;
     
       // TopologySpreadConstraints describes how a group of pods ought to spread across topology
       // domains. Scheduler will schedule pods in a way which abides by the constraints.
    @@ -4230,6 +4329,7 @@ message PodSpec {
       // - spec.securityContext.runAsUser
       // - spec.securityContext.runAsGroup
       // - spec.securityContext.supplementalGroups
    +  // - spec.securityContext.supplementalGroupsPolicy
       // - spec.containers[*].securityContext.appArmorProfile
       // - spec.containers[*].securityContext.seLinuxOptions
       // - spec.containers[*].securityContext.seccompProfile
    @@ -4375,7 +4475,7 @@ message PodStatus {
       // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
       // This is before the Kubelet pulled the container image(s) for the pod.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
     
       // The list has one entry per init container in the manifest. The most recent successful
       // init container will have ready = true, the most recently started container will have
    @@ -4423,7 +4523,7 @@ message PodStatusResult {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Most recently observed status of the pod.
       // This data may not be up to date.
    @@ -4439,7 +4539,7 @@ message PodTemplate {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Template defines the pods that will be created from this pod template.
       // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -4452,7 +4552,7 @@ message PodTemplateList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of pod templates
       repeated PodTemplate items = 2;
    @@ -4463,7 +4563,7 @@ message PodTemplateSpec {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the pod.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -4525,7 +4625,7 @@ message PreferAvoidPodsEntry {
     
       // Time at which this entry was added to the list.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2;
     
       // (brief) reason why this entry was added to the list.
       // +optional
    @@ -4614,7 +4714,8 @@ message ProbeHandler {
     
     // Represents a projected volume source
     message ProjectedVolumeSource {
    -  // sources is the list of volume projections
    +  // sources is the list of volume projections. Each entry in this list
    +  // handles one source.
       // +optional
       // +listType=atomic
       repeated VolumeProjection sources = 1;
    @@ -4685,18 +4786,21 @@ message RBDPersistentVolumeSource {
       // Default is rbd.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="rbd"
       optional string pool = 4;
     
       // user is the rados user name.
       // Default is admin.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="admin"
       optional string user = 5;
     
       // keyring is the path to key ring for RBDUser.
       // Default is /etc/ceph/keyring.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="/etc/ceph/keyring"
       optional string keyring = 6;
     
       // secretRef is name of the authentication secret for RBDUser. If provided
    @@ -4737,18 +4841,21 @@ message RBDVolumeSource {
       // Default is rbd.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="rbd"
       optional string pool = 4;
     
       // user is the rados user name.
       // Default is admin.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="admin"
       optional string user = 5;
     
       // keyring is the path to key ring for RBDUser.
       // Default is /etc/ceph/keyring.
       // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
       // +optional
    +  // +default="/etc/ceph/keyring"
       optional string keyring = 6;
     
       // secretRef is name of the authentication secret for RBDUser. If provided
    @@ -4770,7 +4877,7 @@ message RangeAllocation {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Range is string that identifies the range represented by 'data'.
       optional string range = 2;
    @@ -4785,7 +4892,7 @@ message ReplicationController {
       // be the same as the Pod(s) that the replication controller manages.
       // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the specification of the desired behavior of the replication controller.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -4811,7 +4918,7 @@ message ReplicationControllerCondition {
     
       // The last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -4827,7 +4934,7 @@ message ReplicationControllerList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of replication controllers.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    @@ -4904,6 +5011,13 @@ message ResourceClaim {
       // the Pod where this field is used. It makes that resource available
       // inside a container.
       optional string name = 1;
    +
    +  // Request is the name chosen for a request in the referenced claim.
    +  // If empty, everything from the claim is made available, otherwise
    +  // only the result of this request.
    +  //
    +  // +optional
    +  optional string request = 2;
     }
     
     // ResourceFieldSelector represents container resources (cpu, memory) and their output format
    @@ -4918,7 +5032,26 @@ message ResourceFieldSelector {
     
       // Specifies the output format of the exposed resources, defaults to "1"
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3;
    +}
    +
    +// ResourceHealth represents the health of a resource. It has the latest device health information.
    +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
    +message ResourceHealth {
    +  // ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
    +  optional string resourceID = 1;
    +
    +  // Health of the resource.
    +  // can be one of:
    +  //  - Healthy: operates as normal
    +  //  - Unhealthy: reported unhealthy. We consider this a temporary health issue
    +  //               since we do not have a mechanism today to distinguish
    +  //               temporary and permanent issues.
    +  //  - Unknown: The status cannot be determined.
    +  //             For example, Device Plugin got unregistered and hasn't been re-registered since.
    +  //
    +  // In future we may want to introduce the PermanentlyUnhealthy Status.
    +  optional string health = 2;
     }
     
     // ResourceQuota sets aggregate quota restrictions enforced per namespace
    @@ -4926,7 +5059,7 @@ message ResourceQuota {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the desired quota.
       // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -4944,7 +5077,7 @@ message ResourceQuotaList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ResourceQuota objects.
       // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
    @@ -4956,7 +5089,7 @@ message ResourceQuotaSpec {
       // hard is the set of desired hard limits for each named resource.
       // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
       // +optional
    -  map hard = 1;
    +  map hard = 1;
     
       // A collection of filters that must match each object tracked by a quota.
       // If not specified, the quota matches all objects.
    @@ -4976,11 +5109,11 @@ message ResourceQuotaStatus {
       // Hard is the set of enforced hard limits for each named resource.
       // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
       // +optional
    -  map hard = 1;
    +  map hard = 1;
     
       // Used is the current observed total usage of the resource in the namespace.
       // +optional
    -  map used = 2;
    +  map used = 2;
     }
     
     // ResourceRequirements describes the compute resource requirements.
    @@ -4988,14 +5121,14 @@ message ResourceRequirements {
       // Limits describes the maximum amount of compute resources allowed.
       // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
       // +optional
    -  map limits = 1;
    +  map limits = 1;
     
       // Requests describes the minimum amount of compute resources required.
       // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
       // otherwise to an implementation-defined value. Requests cannot exceed Limits.
       // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
       // +optional
    -  map requests = 2;
    +  map requests = 2;
     
       // Claims lists the names of resources, defined in spec.resourceClaims,
       // that are used by this container.
    @@ -5012,6 +5145,20 @@ message ResourceRequirements {
       repeated ResourceClaim claims = 3;
     }
     
    +message ResourceStatus {
    +  // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
    +  // +required
    +  optional string name = 1;
    +
    +  // List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
    +  // At a minimum, ResourceID must uniquely identify the Resource
    +  // allocated to the Pod on the Node for the lifetime of a Pod.
    +  // See ResourceID type for it's definition.
    +  // +listType=map
    +  // +listMapKey=resourceID
    +  repeated ResourceHealth resources = 2;
    +}
    +
     // SELinuxOptions are the labels to be applied to the container
     message SELinuxOptions {
       // User is a SELinux user label that applies to the container.
    @@ -5058,6 +5205,7 @@ message ScaleIOPersistentVolumeSource {
       // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
       // Default is ThinProvisioned.
       // +optional
    +  // +default="ThinProvisioned"
       optional string storageMode = 7;
     
       // volumeName is the name of a volume already created in the ScaleIO system
    @@ -5069,6 +5217,7 @@ message ScaleIOPersistentVolumeSource {
       // Ex. "ext4", "xfs", "ntfs".
       // Default is "xfs"
       // +optional
    +  // +default="xfs"
       optional string fsType = 9;
     
       // readOnly defaults to false (read/write). ReadOnly here will force
    @@ -5104,6 +5253,7 @@ message ScaleIOVolumeSource {
       // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
       // Default is ThinProvisioned.
       // +optional
    +  // +default="ThinProvisioned"
       optional string storageMode = 7;
     
       // volumeName is the name of a volume already created in the ScaleIO system
    @@ -5115,6 +5265,7 @@ message ScaleIOVolumeSource {
       // Ex. "ext4", "xfs", "ntfs".
       // Default is "xfs".
       // +optional
    +  // +default="xfs"
       optional string fsType = 9;
     
       // readOnly Defaults to false (read/write). ReadOnly here will force
    @@ -5179,7 +5330,7 @@ message Secret {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Immutable, if set to true, ensures that data stored in the Secret cannot
       // be updated (only object metadata can be modified).
    @@ -5242,7 +5393,7 @@ message SecretList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of secret objects.
       // More info: https://kubernetes.io/docs/concepts/configuration/secret
    @@ -5398,7 +5549,7 @@ message SecurityContext {
       optional bool allowPrivilegeEscalation = 7;
     
       // procMount denotes the type of proc mount to use for the containers.
    -  // The default is DefaultProcMount which uses the container runtime defaults for
    +  // The default value is Default which uses the container runtime defaults for
       // readonly paths and masked paths.
       // This requires the ProcMountType feature flag to be enabled.
       // Note that this field cannot be set when spec.os.name is windows.
    @@ -5433,7 +5584,7 @@ message Service {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the behavior of a service.
       // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -5456,7 +5607,7 @@ message ServiceAccount {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
       // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
    @@ -5489,7 +5640,7 @@ message ServiceAccountList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ServiceAccounts.
       // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
    @@ -5527,7 +5678,7 @@ message ServiceList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of services
       repeated Service items = 2;
    @@ -5579,7 +5730,7 @@ message ServicePort {
       // omitted or set equal to the 'port' field.
       // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
     
       // The port on each node on which this service is exposed when type is
       // NodePort or LoadBalancer.  Usually assigned by the system. If a value is
    @@ -5864,7 +6015,7 @@ message ServiceStatus {
       // +patchStrategy=merge
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
     }
     
     // SessionAffinityConfig represents the configurations of session affinity.
    @@ -5958,7 +6109,7 @@ message TCPSocketAction {
       // Number or name of the port to access on the container.
       // Number must be in the range 1 to 65535.
       // Name must be an IANA_SVC_NAME.
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1;
     
       // Optional: Host name to connect to, defaults to the pod IP.
       // +optional
    @@ -5983,7 +6134,7 @@ message Taint {
       // TimeAdded represents the time at which the taint was added.
       // It is only written for NoExecute taints.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
     }
     
     // The pod this Toleration is attached to tolerates any taint that matches
    @@ -6107,7 +6258,7 @@ message TopologySpreadConstraint {
       // Pods that match this label selector are counted to determine the number of pods
       // in their corresponding topology domain.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4;
     
       // MinDomains indicates a minimum number of eligible domains.
       // When the number of eligible domains with matching topology keys is less than minDomains,
    @@ -6313,7 +6464,8 @@ message VolumeNodeAffinity {
       optional NodeSelector required = 1;
     }
     
    -// Projection that may be projected along with other supported volume types
    +// Projection that may be projected along with other supported volume types.
    +// Exactly one of these fields must be set.
     message VolumeProjection {
       // secret information about the secret data to project
       // +optional
    @@ -6355,14 +6507,14 @@ message VolumeResourceRequirements {
       // Limits describes the maximum amount of compute resources allowed.
       // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
       // +optional
    -  map limits = 1;
    +  map limits = 1;
     
       // Requests describes the minimum amount of compute resources required.
       // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
       // otherwise to an implementation-defined value. Requests cannot exceed Limits.
       // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
       // +optional
    -  map requests = 2;
    +  map requests = 2;
     }
     
     // Represents the source of a volume to mount.
    @@ -6530,6 +6682,24 @@ message VolumeSource {
       //
       // +optional
       optional EphemeralVolumeSource ephemeral = 29;
    +
    +  // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
    +  // The volume is resolved at pod startup depending on which PullPolicy value is provided:
    +  //
    +  // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
    +  // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
    +  // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
    +  //
    +  // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
    +  // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
    +  // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
    +  // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
    +  // The volume will be mounted read-only (ro) and non-executable files (noexec).
    +  // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
    +  // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
    +  // +featureGate=ImageVolume
    +  // +optional
    +  optional ImageVolumeSource image = 30;
     }
     
     // Represents a vSphere volume resource.
    diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
    index 328df9a7b7..3a74138bae 100644
    --- a/vendor/k8s.io/api/core/v1/types.go
    +++ b/vendor/k8s.io/api/core/v1/types.go
    @@ -181,6 +181,23 @@ type VolumeSource struct {
     	//
     	// +optional
     	Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
    +	// image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
    +	// The volume is resolved at pod startup depending on which PullPolicy value is provided:
    +	//
    +	// - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
    +	// - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
    +	// - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
    +	//
    +	// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
    +	// A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
    +	// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
    +	// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
    +	// The volume will be mounted read-only (ro) and non-executable files (noexec).
    +	// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
    +	// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
    +	// +featureGate=ImageVolume
    +	// +optional
    +	Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"`
     }
     
     // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
    @@ -295,6 +312,7 @@ const (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PersistentVolume (PV) is a storage resource provisioned by an administrator.
     // It is analogous to a node.
    @@ -371,7 +389,7 @@ type PersistentVolumeSpec struct {
     	// after a volume has been updated successfully to a new class.
     	// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
     	// PersistentVolumeClaims during the binding process.
    -	// This is an alpha field and requires enabling VolumeAttributesClass feature.
    +	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
    @@ -425,13 +443,12 @@ type PersistentVolumeStatus struct {
     	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
     	// lastPhaseTransitionTime is the time the phase transitioned from one to another
     	// and automatically resets to current time everytime a volume phase transitions.
    -	// This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).
    -	// +featureGate=PersistentVolumeLastPhaseTransitionTime
     	// +optional
     	LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PersistentVolumeList is a list of PersistentVolume items.
     type PersistentVolumeList struct {
    @@ -447,6 +464,7 @@ type PersistentVolumeList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PersistentVolumeClaim is a user's request for and claim to a persistent volume
     type PersistentVolumeClaim struct {
    @@ -469,6 +487,7 @@ type PersistentVolumeClaim struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
     type PersistentVolumeClaimList struct {
    @@ -557,7 +576,7 @@ type PersistentVolumeClaimSpec struct {
     	// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
     	// exists.
     	// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
    -	// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
    +	// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
    @@ -581,15 +600,29 @@ type TypedObjectReference struct {
     	Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
     }
     
    -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
    +// PersistentVolumeClaimConditionType defines the condition of PV claim.
    +// Valid values are:
    +//   - "Resizing", "FileSystemResizePending"
    +//
    +// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
    +//   - "ControllerResizeError", "NodeResizeError"
    +//
    +// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
    +//   - "ModifyVolumeError", "ModifyingVolume"
     type PersistentVolumeClaimConditionType string
     
    +// These are valid conditions of PVC
     const (
     	// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
     	PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
     	// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
     	PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
     
    +	// PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller
    +	PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError"
    +	// PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node.
    +	PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError"
    +
     	// Applying the target VolumeAttributesClass encountered an error
     	PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
     	// Volume is being modified
    @@ -606,18 +639,19 @@ const (
     	// State set when resize controller starts resizing the volume in control-plane.
     	PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
     
    -	// State set when resize has failed in resize controller with a terminal error.
    +	// State set when resize has failed in resize controller with a terminal unrecoverable error.
     	// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
     	// unmodified, so as resize controller can resume the volume expansion.
    -	PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
    +	PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible"
     
     	// State set when resize controller has finished resizing the volume but further resizing of volume
     	// is needed on the node.
     	PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
     	// State set when kubelet starts resizing the volume.
     	PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
    -	// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
    -	PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
    +	// State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors
    +	// shouldn't set this status
    +	PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible"
     )
     
     // +enum
    @@ -763,13 +797,13 @@ type PersistentVolumeClaimStatus struct {
     	AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
     	// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
     	// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
    -	// This is an alpha field and requires enabling VolumeAttributesClass feature.
    +	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
     	// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
     	// When this is unset, there is no ModifyVolume operation being attempted.
    -	// This is an alpha field and requires enabling VolumeAttributesClass feature.
    +	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
     	// +featureGate=VolumeAttributesClass
     	// +optional
     	ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
    @@ -943,16 +977,19 @@ type RBDVolumeSource struct {
     	// Default is rbd.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="rbd"
     	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
     	// user is the rados user name.
     	// Default is admin.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="admin"
     	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
     	// keyring is the path to key ring for RBDUser.
     	// Default is /etc/ceph/keyring.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="/etc/ceph/keyring"
     	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
     	// secretRef is name of the authentication secret for RBDUser. If provided
     	// overrides keyring.
    @@ -988,16 +1025,19 @@ type RBDPersistentVolumeSource struct {
     	// Default is rbd.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="rbd"
     	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
     	// user is the rados user name.
     	// Default is admin.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="admin"
     	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
     	// keyring is the path to key ring for RBDUser.
     	// Default is /etc/ceph/keyring.
     	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
     	// +optional
    +	// +default="/etc/ceph/keyring"
     	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
     	// secretRef is name of the authentication secret for RBDUser. If provided
     	// overrides keyring.
    @@ -1426,6 +1466,7 @@ type ISCSIVolumeSource struct {
     	// iscsiInterface is the interface Name that uses an iSCSI transport.
     	// Defaults to 'default' (tcp).
     	// +optional
    +	// +default="default"
     	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
     	// fsType is the filesystem type of the volume that you want to mount.
     	// Tip: Ensure that the filesystem type is supported by the host operating system.
    @@ -1473,6 +1514,7 @@ type ISCSIPersistentVolumeSource struct {
     	// iscsiInterface is the interface Name that uses an iSCSI transport.
     	// Defaults to 'default' (tcp).
     	// +optional
    +	// +default="default"
     	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
     	// fsType is the filesystem type of the volume that you want to mount.
     	// Tip: Ensure that the filesystem type is supported by the host operating system.
    @@ -1613,17 +1655,21 @@ type AzureDiskVolumeSource struct {
     	DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
     	// cachingMode is the Host Caching mode: None, Read Only, Read Write.
     	// +optional
    +	// +default=ref(AzureDataDiskCachingReadWrite)
     	CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
     	// fsType is Filesystem type to mount.
     	// Must be a filesystem type supported by the host operating system.
     	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
     	// +optional
    +	// +default="ext4"
     	FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
     	// readOnly Defaults to false (read/write). ReadOnly here will force
     	// the ReadOnly setting in VolumeMounts.
     	// +optional
    +	// +default=false
     	ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
     	// kind expected values are Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
    +	// +default=ref(AzureSharedBlobDisk)
     	Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
     }
     
    @@ -1662,6 +1708,7 @@ type ScaleIOVolumeSource struct {
     	// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
     	// Default is ThinProvisioned.
     	// +optional
    +	// +default="ThinProvisioned"
     	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
     	// volumeName is the name of a volume already created in the ScaleIO system
     	// that is associated with this volume source.
    @@ -1671,6 +1718,7 @@ type ScaleIOVolumeSource struct {
     	// Ex. "ext4", "xfs", "ntfs".
     	// Default is "xfs".
     	// +optional
    +	// +default="xfs"
     	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
     	// readOnly Defaults to false (read/write). ReadOnly here will force
     	// the ReadOnly setting in VolumeMounts.
    @@ -1699,6 +1747,7 @@ type ScaleIOPersistentVolumeSource struct {
     	// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
     	// Default is ThinProvisioned.
     	// +optional
    +	// +default="ThinProvisioned"
     	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
     	// volumeName is the name of a volume already created in the ScaleIO system
     	// that is associated with this volume source.
    @@ -1708,6 +1757,7 @@ type ScaleIOPersistentVolumeSource struct {
     	// Ex. "ext4", "xfs", "ntfs".
     	// Default is "xfs"
     	// +optional
    +	// +default="xfs"
     	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
     	// readOnly defaults to false (read/write). ReadOnly here will force
     	// the ReadOnly setting in VolumeMounts.
    @@ -1891,7 +1941,8 @@ type ClusterTrustBundleProjection struct {
     
     // Represents a projected volume source
     type ProjectedVolumeSource struct {
    -	// sources is the list of volume projections
    +	// sources is the list of volume projections. Each entry in this list
    +	// handles one source.
     	// +optional
     	// +listType=atomic
     	Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
    @@ -1905,10 +1956,9 @@ type ProjectedVolumeSource struct {
     	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
     }
     
    -// Projection that may be projected along with other supported volume types
    +// Projection that may be projected along with other supported volume types.
    +// Exactly one of these fields must be set.
     type VolumeProjection struct {
    -	// all types below are the supported types for projection into the same volume
    -
     	// secret information about the secret data to project
     	// +optional
     	Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
    @@ -2631,6 +2681,13 @@ type ResourceClaim struct {
     	// the Pod where this field is used. It makes that resource available
     	// inside a container.
     	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
    +
    +	// Request is the name chosen for a request in the referenced claim.
    +	// If empty, everything from the claim is made available, otherwise
    +	// only the result of this request.
    +	//
    +	// +optional
    +	Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"`
     }
     
     const (
    @@ -3030,6 +3087,93 @@ type ContainerStatus struct {
     	// +listMapKey=mountPath
     	// +featureGate=RecursiveReadOnlyMounts
     	VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
    +	// User represents user identity information initially attached to the first process of the container
    +	// +featureGate=SupplementalGroupsPolicy
    +	// +optional
    +	User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"`
    +	// AllocatedResourcesStatus represents the status of various resources
    +	// allocated for this Pod.
    +	// +featureGate=ResourceHealthStatus
    +	// +optional
    +	// +patchMergeKey=name
    +	// +patchStrategy=merge
    +	// +listType=map
    +	// +listMapKey=name
    +	AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
    +}
    +
    +type ResourceStatus struct {
    +	// Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
    +	// +required
    +	Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
    +	// List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
    +	// At a minimum, ResourceID must uniquely identify the Resource
    +	// allocated to the Pod on the Node for the lifetime of a Pod.
    +	// See ResourceID type for it's definition.
    +	// +listType=map
    +	// +listMapKey=resourceID
    +	Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
    +}
    +
    +type ResourceHealthStatus string
    +
    +const (
    +	ResourceHealthStatusHealthy   ResourceHealthStatus = "Healthy"
    +	ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy"
    +	ResourceHealthStatusUnknown   ResourceHealthStatus = "Unknown"
    +)
    +
    +// ResourceID is calculated based on the source of this resource health information.
    +// For DevicePlugin:
    +//
    +//	deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
    +//
    +// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
    +// For DRA:
    +//
    +//	dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
    +type ResourceID string
    +
    +// ResourceHealth represents the health of a resource. It has the latest device health information.
    +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
    +type ResourceHealth struct {
    +	// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
    +	ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
    +	// Health of the resource.
    +	// can be one of:
    +	//  - Healthy: operates as normal
    +	//  - Unhealthy: reported unhealthy. We consider this a temporary health issue
    +	//               since we do not have a mechanism today to distinguish
    +	//               temporary and permanent issues.
    +	//  - Unknown: The status cannot be determined.
    +	//             For example, Device Plugin got unregistered and hasn't been re-registered since.
    +	//
    +	// In future we may want to introduce the PermanentlyUnhealthy Status.
    +	Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"`
    +}
    +
    +// ContainerUser represents user identity information
    +type ContainerUser struct {
    +	// Linux holds user identity information initially attached to the first process of the containers in Linux.
    +	// Note that the actual running identity can be changed if the process has enough privilege to do so.
    +	// +optional
    +	Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"`
    +
    +	// Windows holds user identity information initially attached to the first process of the containers in Windows
    +	// This is just reserved for future use.
    +	// Windows *WindowsContainerUser
    +}
    +
    +// LinuxContainerUser represents user identity information in Linux containers
    +type LinuxContainerUser struct {
    +	// UID is the primary uid initially attached to the first process in the container
    +	UID int64 `json:"uid" protobuf:"varint,1,name=uid"`
    +	// GID is the primary gid initially attached to the first process in the container
    +	GID int64 `json:"gid" protobuf:"varint,2,name=gid"`
    +	// SupplementalGroups are the supplemental groups initially attached to the first process in the container
    +	// +optional
    +	// +listType=atomic
    +	SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"`
     }
     
     // PodPhase is a label for the condition of a pod at the current time.
    @@ -3426,7 +3570,8 @@ type PodAffinityTerm struct {
     	// pod labels will be ignored. The default value is empty.
     	// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
     	// Also, matchLabelKeys cannot be set when labelSelector isn't set.
    -	// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    +	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
    +	//
     	// +listType=atomic
     	// +optional
     	MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
    @@ -3438,7 +3583,8 @@ type PodAffinityTerm struct {
     	// pod labels will be ignored. The default value is empty.
     	// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
     	// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
    -	// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    +	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
    +	//
     	// +listType=atomic
     	// +optional
     	MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
    @@ -3667,9 +3813,11 @@ type PodSpec struct {
     	// +optional
     	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
     
    -	// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
    -	// the scheduler simply schedules this pod onto that node, assuming that it fits resource
    -	// requirements.
    +	// NodeName indicates in which node this pod is scheduled.
    +	// If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
    +	// Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
    +	// This field should not be used to express a desire for the pod to be scheduled on a specific node.
    +	// https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
     	// +optional
     	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
     	// Host networking requested for this pod. Use the host's network namespace.
    @@ -3826,6 +3974,7 @@ type PodSpec struct {
     	// - spec.securityContext.runAsUser
     	// - spec.securityContext.runAsGroup
     	// - spec.securityContext.supplementalGroups
    +	// - spec.securityContext.supplementalGroupsPolicy
     	// - spec.containers[*].securityContext.appArmorProfile
     	// - spec.containers[*].securityContext.seLinuxOptions
     	// - spec.containers[*].securityContext.seccompProfile
    @@ -3883,7 +4032,10 @@ type PodSpec struct {
     	ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
     }
     
    -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
    +// PodResourceClaim references exactly one ResourceClaim, either directly
    +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
    +// for the pod.
    +//
     // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
     // Containers that need access to the ResourceClaim reference it with this name.
     type PodResourceClaim struct {
    @@ -3891,18 +4043,17 @@ type PodResourceClaim struct {
     	// This must be a DNS_LABEL.
     	Name string `json:"name" protobuf:"bytes,1,name=name"`
     
    -	// Source describes where to find the ResourceClaim.
    -	Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
    -}
    +	// Source is tombstoned since Kubernetes 1.31 where it got replaced with
    +	// the inlined fields below.
    +	//
    +	// Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
     
    -// ClaimSource describes a reference to a ResourceClaim.
    -//
    -// Exactly one of these fields should be set.  Consumers of this type must
    -// treat an empty object as if it has an unknown value.
    -type ClaimSource struct {
     	// ResourceClaimName is the name of a ResourceClaim object in the same
     	// namespace as this pod.
    -	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"`
    +	//
    +	// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
    +	// be set.
    +	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"`
     
     	// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
     	// object in the same namespace as this pod.
    @@ -3916,7 +4067,10 @@ type ClaimSource struct {
     	// This field is immutable and no changes will be made to the
     	// corresponding ResourceClaim by the control plane after creating the
     	// ResourceClaim.
    -	ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"`
    +	//
    +	// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
    +	// be set.
    +	ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"`
     }
     
     // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
    @@ -3929,7 +4083,7 @@ type PodResourceClaimStatus struct {
     	Name string `json:"name" protobuf:"bytes,1,name=name"`
     
     	// ResourceClaimName is the name of the ResourceClaim that was
    -	// generated for the Pod in the namespace of the Pod. It this is
    +	// generated for the Pod in the namespace of the Pod. If this is
     	// unset, then generating a ResourceClaim was not necessary. The
     	// pod.spec.resourceClaims entry can be ignored in this case.
     	//
    @@ -4137,6 +4291,23 @@ const (
     	FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
     )
     
    +// SupplementalGroupsPolicy defines how supplemental groups
    +// of the first container processes are calculated.
    +// +enum
    +type SupplementalGroupsPolicy string
    +
    +const (
    +	// SupplementalGroupsPolicyMerge means that the container's provided
    +	// SupplementalGroups and FsGroup (specified in SecurityContext) will be
    +	// merged with the primary user's groups as defined in the container image
    +	// (in /etc/group).
    +	SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge"
    +	// SupplementalGroupsPolicyStrict means that the container's provided
    +	// SupplementalGroups and FsGroup (specified in SecurityContext) will be
    +	// used instead of any groups defined in the container image.
    +	SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
    +)
    +
     // PodSecurityContext holds pod-level security attributes and common container settings.
     // Some fields are also present in container.securityContext.  Field values of
     // container.securityContext take precedence over field values of PodSecurityContext.
    @@ -4179,16 +4350,27 @@ type PodSecurityContext struct {
     	// PodSecurityContext, the value specified in SecurityContext takes precedence.
     	// +optional
     	RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
    -	// A list of groups applied to the first process run in each container, in addition
    -	// to the container's primary GID, the fsGroup (if specified), and group memberships
    -	// defined in the container image for the uid of the container process. If unspecified,
    -	// no additional groups are added to any container. Note that group memberships
    -	// defined in the container image for the uid of the container process are still effective,
    -	// even if they are not included in this list.
    +	// A list of groups applied to the first process run in each container, in
    +	// addition to the container's primary GID and fsGroup (if specified).  If
    +	// the SupplementalGroupsPolicy feature is enabled, the
    +	// supplementalGroupsPolicy field determines whether these are in addition
    +	// to or instead of any group memberships defined in the container image.
    +	// If unspecified, no additional groups are added, though group memberships
    +	// defined in the container image may still be used, depending on the
    +	// supplementalGroupsPolicy field.
     	// Note that this field cannot be set when spec.os.name is windows.
     	// +optional
     	// +listType=atomic
     	SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
    +	// Defines how supplemental groups of the first container processes are calculated.
    +	// Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
    +	// (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
    +	// and the container runtime must implement support for this feature.
    +	// Note that this field cannot be set when spec.os.name is windows.
    +	// TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
    +	// +featureGate=SupplementalGroupsPolicy
    +	// +optional
    +	SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"`
     	// A special supplemental group that applies to all containers in a pod.
     	// Some volume types allow the Kubelet to change the ownership of that volume
     	// to be owned by the pod:
    @@ -4340,13 +4522,15 @@ type PodDNSConfigOption struct {
     // PodIP represents a single IP address allocated to the pod.
     type PodIP struct {
     	// IP is the IP address assigned to the pod
    -	IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
    +	// +required
    +	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
     }
     
     // HostIP represents a single IP address allocated to the host.
     type HostIP struct {
     	// IP is the IP address assigned to the host
    -	IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
    +	// +required
    +	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
     }
     
     // EphemeralContainerCommon is a copy of all fields in Container to be inlined in
    @@ -4663,6 +4847,7 @@ type PodStatus struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
     type PodStatusResult struct {
    @@ -4683,6 +4868,7 @@ type PodStatusResult struct {
     // +genclient
     // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Pod is a collection of containers that can run on a host. This resource is created
     // by clients and scheduled onto hosts.
    @@ -4708,6 +4894,7 @@ type Pod struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodList is a list of Pods.
     type PodList struct {
    @@ -4737,6 +4924,7 @@ type PodTemplateSpec struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodTemplate describes a template for creating copies of a predefined pod.
     type PodTemplate struct {
    @@ -4753,6 +4941,7 @@ type PodTemplate struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodTemplateList is a list of PodTemplates.
     type PodTemplateList struct {
    @@ -4867,6 +5056,7 @@ type ReplicationControllerCondition struct {
     // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
     // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ReplicationController represents the configuration of a replication controller.
     type ReplicationController struct {
    @@ -4893,6 +5083,7 @@ type ReplicationController struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ReplicationControllerList is a collection of replication controllers.
     type ReplicationControllerList struct {
    @@ -5437,6 +5628,7 @@ type ServicePort struct {
     // +genclient
     // +genclient:skipVerbs=deleteCollection
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Service is a named abstraction of software service (for example, mysql) consisting of local port
     // (for example 3306) that the proxy listens on, and the selector that determines which pods
    @@ -5468,6 +5660,7 @@ const (
     )
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ServiceList holds a list of services.
     type ServiceList struct {
    @@ -5484,6 +5677,7 @@ type ServiceList struct {
     // +genclient
     // +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ServiceAccount binds together:
     // * a name, understood by users, and perhaps by peripheral systems, for an identity
    @@ -5523,6 +5717,7 @@ type ServiceAccount struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ServiceAccountList is a list of ServiceAccount objects
     type ServiceAccountList struct {
    @@ -5539,6 +5734,7 @@ type ServiceAccountList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Endpoints is a collection of endpoints that implement the actual service. Example:
     //
    @@ -5660,6 +5856,7 @@ type EndpointPort struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // EndpointsList is a list of endpoints.
     type EndpointsList struct {
    @@ -5772,13 +5969,16 @@ type NodeDaemonEndpoints struct {
     	KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
     }
     
    -// NodeRuntimeHandlerFeatures is a set of runtime features.
    +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
     type NodeRuntimeHandlerFeatures struct {
     	// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
     	// +featureGate=RecursiveReadOnlyMounts
     	// +optional
     	RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
    -	// Reserved: UserNamespaces *bool (varint 2, for consistency with CRI API)
    +	// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
    +	// +featureGate=UserNamespacesSupport
    +	// +optional
    +	UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"`
     }
     
     // NodeRuntimeHandler is a set of runtime handler information.
    @@ -5792,6 +5992,15 @@ type NodeRuntimeHandler struct {
     	Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
     }
     
    +// NodeFeatures describes the set of features implemented by the CRI implementation.
    +// The features contained in the NodeFeatures should depend only on the cri implementation
    +// independent of runtime handlers.
    +type NodeFeatures struct {
    +	// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
    +	// +optional
    +	SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"`
    +}
    +
     // NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
     type NodeSystemInfo struct {
     	// MachineID reported by the node. For unique machine identification
    @@ -5812,7 +6021,7 @@ type NodeSystemInfo struct {
     	ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
     	// Kubelet Version reported by the node.
     	KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
    -	// KubeProxy Version reported by the node.
    +	// Deprecated: KubeProxy Version reported by the node.
     	KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
     	// The Operating System reported by the node
     	OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
    @@ -5870,7 +6079,7 @@ type NodeConfigStatus struct {
     // NodeStatus is information about the current status of a node.
     type NodeStatus struct {
     	// Capacity represents the total resources of a node.
    -	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
    +	// More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
     	// +optional
     	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
     	// Allocatable represents the resources of a node that are available for scheduling.
    @@ -5930,9 +6139,14 @@ type NodeStatus struct {
     	Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
     	// The available runtime handlers.
     	// +featureGate=RecursiveReadOnlyMounts
    +	// +featureGate=UserNamespacesSupport
     	// +optional
     	// +listType=atomic
     	RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
    +	// Features describes the set of features implemented by the CRI implementation.
    +	// +featureGate=SupplementalGroupsPolicy
    +	// +optional
    +	Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"`
     }
     
     type UniqueVolumeName string
    @@ -6128,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Node is a worker node in Kubernetes.
     // Each node will have a unique identifier in the cache (i.e. in etcd).
    @@ -6152,6 +6367,7 @@ type Node struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // NodeList is the whole list of all Nodes which have been registered with master.
     type NodeList struct {
    @@ -6250,6 +6466,7 @@ type NamespaceCondition struct {
     // +genclient:nonNamespaced
     // +genclient:skipVerbs=deleteCollection
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Namespace provides a scope for Names.
     // Use of multiple namespaces is optional.
    @@ -6272,6 +6489,7 @@ type Namespace struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // NamespaceList is a list of Namespaces.
     type NamespaceList struct {
    @@ -6287,6 +6505,7 @@ type NamespaceList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
     // Deprecated in 1.7, please use the bindings subresource of pods instead.
    @@ -6311,6 +6530,7 @@ type Preconditions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodLogOptions is the query options for a Pod's logs REST call.
     type PodLogOptions struct {
    @@ -6363,6 +6583,7 @@ type PodLogOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.1
     
     // PodAttachOptions is the query options to a Pod's remote attach call.
     // ---
    @@ -6401,6 +6622,7 @@ type PodAttachOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodExecOptions is the query options to a Pod's remote exec call.
     // ---
    @@ -6439,6 +6661,7 @@ type PodExecOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.6
     
     // PodPortForwardOptions is the query options to a Pod's port forward call
     // when using WebSockets.
    @@ -6458,6 +6681,7 @@ type PodPortForwardOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // PodProxyOptions is the query options to a Pod's proxy call.
     type PodProxyOptions struct {
    @@ -6470,6 +6694,7 @@ type PodProxyOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // NodeProxyOptions is the query options to a Node's proxy call.
     type NodeProxyOptions struct {
    @@ -6482,6 +6707,7 @@ type NodeProxyOptions struct {
     
     // +k8s:conversion-gen:explicit-from=net/url.Values
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // ServiceProxyOptions is the query options to a Service's proxy call.
     type ServiceProxyOptions struct {
    @@ -6584,6 +6810,7 @@ type TypedLocalObjectReference struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // SerializedReference is a reference to serialized object.
     type SerializedReference struct {
    @@ -6613,6 +6840,7 @@ const (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Event is a report of an event somewhere in the cluster.  Events
     // have a limited retention time and triggers and messages may evolve
    @@ -6697,6 +6925,7 @@ type EventSeries struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // EventList is a list of events.
     type EventList struct {
    @@ -6711,6 +6940,7 @@ type EventList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // List holds a list of objects, which may not be known by the server.
     type List metav1.List
    @@ -6758,6 +6988,7 @@ type LimitRangeSpec struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // LimitRange sets resource usage limits for each kind of resource in a Namespace.
     type LimitRange struct {
    @@ -6774,6 +7005,7 @@ type LimitRange struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // LimitRangeList is a list of LimitRange items.
     type LimitRangeList struct {
    @@ -6822,6 +7054,8 @@ const (
     	ResourceLimitsMemory ResourceName = "limits.memory"
     	// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
     	ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
    +	// resource.k8s.io devices requested with a certain DeviceClass, number
    +	ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
     )
     
     // The following identify resource prefix for Kubernetes object types
    @@ -6922,6 +7156,7 @@ type ResourceQuotaStatus struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ResourceQuota sets aggregate quota restrictions enforced per namespace
     type ResourceQuota struct {
    @@ -6943,6 +7178,7 @@ type ResourceQuota struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ResourceQuotaList is a list of ResourceQuota items.
     type ResourceQuotaList struct {
    @@ -6959,6 +7195,7 @@ type ResourceQuotaList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Secret holds secret data of a certain type. The total bytes of the values in
     // the Data field must be less than MaxSecretSize bytes.
    @@ -7085,6 +7322,7 @@ const (
     )
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // SecretList is a list of Secret.
     type SecretList struct {
    @@ -7101,6 +7339,7 @@ type SecretList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // ConfigMap holds configuration data for pods to consume.
     type ConfigMap struct {
    @@ -7137,6 +7376,7 @@ type ConfigMap struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.2
     
     // ConfigMapList is a resource containing a list of ConfigMap objects.
     type ConfigMapList struct {
    @@ -7179,6 +7419,7 @@ type ComponentCondition struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // ComponentStatus (and ComponentStatusList) holds the cluster validation info.
     // Deprecated: This API is deprecated in v1.19+
    @@ -7199,6 +7440,7 @@ type ComponentStatus struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // Status of all the conditions for the component as a list of ComponentStatus objects.
     // Deprecated: This API is deprecated in v1.19+
    @@ -7332,7 +7574,7 @@ type SecurityContext struct {
     	// +optional
     	AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
     	// procMount denotes the type of proc mount to use for the containers.
    -	// The default is DefaultProcMount which uses the container runtime defaults for
    +	// The default value is Default which uses the container runtime defaults for
     	// readonly paths and masked paths.
     	// This requires the ProcMountType feature flag to be enabled.
     	// Note that this field cannot be set when spec.os.name is windows.
    @@ -7410,6 +7652,7 @@ type WindowsSecurityContextOptions struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.0
     
     // RangeAllocation is not a public type.
     type RangeAllocation struct {
    @@ -7519,3 +7762,23 @@ const (
     	// the destination set to the node's IP and port or the pod's IP and port.
     	LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
     )
    +
    +// ImageVolumeSource represents a image volume resource.
    +type ImageVolumeSource struct {
    +	// Required: Image or artifact reference to be used.
    +	// Behaves in the same way as pod.spec.containers[*].image.
    +	// Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
    +	// More info: https://kubernetes.io/docs/concepts/containers/images
    +	// This field is optional to allow higher level config management to default or override
    +	// container images in workload controllers like Deployments and StatefulSets.
    +	// +optional
    +	Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
    +
    +	// Policy for pulling OCI objects. Possible values are:
    +	// Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
    +	// Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
    +	// IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
    +	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
    +	// +optional
    +	PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"`
    +}
    diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    index c54f2a2fe5..950806ef8e 100644
    --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
    @@ -219,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string {
     	return map_CinderVolumeSource
     }
     
    -var map_ClaimSource = map[string]string{
    -	"":                          "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set.  Consumers of this type must treat an empty object as if it has an unknown value.",
    -	"resourceClaimName":         "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
    -	"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
    -}
    -
    -func (ClaimSource) SwaggerDoc() map[string]string {
    -	return map_ClaimSource
    -}
    -
     var map_ClientIPConfig = map[string]string{
     	"":               "ClientIPConfig represents the configurations of Client IP based session affinity.",
     	"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
    @@ -469,25 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string {
     }
     
     var map_ContainerStatus = map[string]string{
    -	"":                   "ContainerStatus contains details for the current status of this container.",
    -	"name":               "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.",
    -	"state":              "State holds details about the container's current condition.",
    -	"lastState":          "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.",
    -	"ready":              "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.",
    -	"restartCount":       "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.",
    -	"image":              "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.",
    -	"imageID":            "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.",
    -	"containerID":        "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").",
    -	"started":            "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.",
    -	"allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
    -	"resources":          "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
    -	"volumeMounts":       "Status of volume mounts.",
    +	"":                         "ContainerStatus contains details for the current status of this container.",
    +	"name":                     "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.",
    +	"state":                    "State holds details about the container's current condition.",
    +	"lastState":                "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.",
    +	"ready":                    "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.",
    +	"restartCount":             "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.",
    +	"image":                    "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.",
    +	"imageID":                  "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.",
    +	"containerID":              "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").",
    +	"started":                  "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.",
    +	"allocatedResources":       "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
    +	"resources":                "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
    +	"volumeMounts":             "Status of volume mounts.",
    +	"user":                     "User represents user identity information initially attached to the first process of the container",
    +	"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
     }
     
     func (ContainerStatus) SwaggerDoc() map[string]string {
     	return map_ContainerStatus
     }
     
    +var map_ContainerUser = map[string]string{
    +	"":      "ContainerUser represents user identity information",
    +	"linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.",
    +}
    +
    +func (ContainerUser) SwaggerDoc() map[string]string {
    +	return map_ContainerUser
    +}
    +
     var map_DaemonEndpoint = map[string]string{
     	"":     "DaemonEndpoint contains information about a single Daemon endpoint.",
     	"Port": "Port number of the given endpoint.",
    @@ -933,6 +934,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
     	return map_ISCSIVolumeSource
     }
     
    +var map_ImageVolumeSource = map[string]string{
    +	"":           "ImageVolumeSource represents a image volume resource.",
    +	"reference":  "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
    +	"pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.",
    +}
    +
    +func (ImageVolumeSource) SwaggerDoc() map[string]string {
    +	return map_ImageVolumeSource
    +}
    +
     var map_KeyToPath = map[string]string{
     	"":     "Maps a string key to a path within a volume.",
     	"key":  "key is the key to project.",
    @@ -1009,6 +1020,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string {
     	return map_LimitRangeSpec
     }
     
    +var map_LinuxContainerUser = map[string]string{
    +	"":                   "LinuxContainerUser represents user identity information in Linux containers",
    +	"uid":                "UID is the primary uid initially attached to the first process in the container",
    +	"gid":                "GID is the primary gid initially attached to the first process in the container",
    +	"supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container",
    +}
    +
    +func (LinuxContainerUser) SwaggerDoc() map[string]string {
    +	return map_LinuxContainerUser
    +}
    +
     var map_LoadBalancerIngress = map[string]string{
     	"":         "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
     	"ip":       "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
    @@ -1195,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
     	return map_NodeDaemonEndpoints
     }
     
    +var map_NodeFeatures = map[string]string{
    +	"":                         "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.",
    +	"supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.",
    +}
    +
    +func (NodeFeatures) SwaggerDoc() map[string]string {
    +	return map_NodeFeatures
    +}
    +
     var map_NodeList = map[string]string{
     	"":         "NodeList is the whole list of all Nodes which have been registered with master.",
     	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
    @@ -1225,8 +1256,9 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string {
     }
     
     var map_NodeRuntimeHandlerFeatures = map[string]string{
    -	"":                        "NodeRuntimeHandlerFeatures is a set of runtime features.",
    +	"":                        "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.",
     	"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
    +	"userNamespaces":          "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.",
     }
     
     func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
    @@ -1280,7 +1312,7 @@ func (NodeSpec) SwaggerDoc() map[string]string {
     
     var map_NodeStatus = map[string]string{
     	"":                "NodeStatus is information about the current status of a node.",
    -	"capacity":        "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
    +	"capacity":        "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
     	"allocatable":     "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
     	"phase":           "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
     	"conditions":      "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
    @@ -1292,6 +1324,7 @@ var map_NodeStatus = map[string]string{
     	"volumesAttached": "List of volumes that are attached to the node.",
     	"config":          "Status of the config assigned to the node via the dynamic Kubelet config feature.",
     	"runtimeHandlers": "The available runtime handlers.",
    +	"features":        "Features describes the set of features implemented by the CRI implementation.",
     }
     
     func (NodeStatus) SwaggerDoc() map[string]string {
    @@ -1307,7 +1340,7 @@ var map_NodeSystemInfo = map[string]string{
     	"osImage":                 "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
     	"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
     	"kubeletVersion":          "Kubelet Version reported by the node.",
    -	"kubeProxyVersion":        "KubeProxy Version reported by the node.",
    +	"kubeProxyVersion":        "Deprecated: KubeProxy Version reported by the node.",
     	"operatingSystem":         "The Operating System reported by the node",
     	"architecture":            "The Architecture reported by the node",
     }
    @@ -1395,7 +1428,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
     	"volumeMode":                "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
     	"dataSource":                "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
     	"dataSourceRef":             "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n  allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n  preserves all values, and generates an error if a disallowed value is\n  specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n  in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
    -	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.",
    +	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
     }
     
     func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
    @@ -1410,8 +1443,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
     	"conditions":                       "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
     	"allocatedResources":               "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
     	"allocatedResourceStatuses":        "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
    -	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.",
    -	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.",
    +	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
    +	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
     }
     
     func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
    @@ -1488,7 +1521,7 @@ var map_PersistentVolumeSpec = map[string]string{
     	"mountOptions":                  "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
     	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
     	"nodeAffinity":                  "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
    -	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.",
    +	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
     }
     
     func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
    @@ -1500,7 +1533,7 @@ var map_PersistentVolumeStatus = map[string]string{
     	"phase":                   "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
     	"message":                 "message is a human-readable message indicating details about why the volume is in this state.",
     	"reason":                  "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
    -	"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).",
    +	"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.",
     }
     
     func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
    @@ -1544,8 +1577,8 @@ var map_PodAffinityTerm = map[string]string{
     	"namespaces":        "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
     	"topologyKey":       "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
     	"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
    -	"matchLabelKeys":    "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
    -	"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
    +	"matchLabelKeys":    "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
    +	"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
     }
     
     func (PodAffinityTerm) SwaggerDoc() map[string]string {
    @@ -1696,9 +1729,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string {
     }
     
     var map_PodResourceClaim = map[string]string{
    -	"":       "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
    -	"name":   "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
    -	"source": "Source describes where to find the ResourceClaim.",
    +	"":                          "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
    +	"name":                      "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
    +	"resourceClaimName":         "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
    +	"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
     }
     
     func (PodResourceClaim) SwaggerDoc() map[string]string {
    @@ -1708,7 +1742,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string {
     var map_PodResourceClaimStatus = map[string]string{
     	"":                  "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
     	"name":              "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
    -	"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
    +	"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
     }
     
     func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
    @@ -1725,18 +1759,19 @@ func (PodSchedulingGate) SwaggerDoc() map[string]string {
     }
     
     var map_PodSecurityContext = map[string]string{
    -	"":                    "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
    -	"seLinuxOptions":      "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    -	"windowsOptions":      "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
    -	"runAsUser":           "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    -	"runAsGroup":          "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    -	"runAsNonRoot":        "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
    -	"supplementalGroups":  "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.",
    -	"fsGroup":             "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
    -	"sysctls":             "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
    -	"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
    -	"seccompProfile":      "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
    -	"appArmorProfile":     "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
    +	"":                         "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
    +	"seLinuxOptions":           "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    +	"windowsOptions":           "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
    +	"runAsUser":                "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    +	"runAsGroup":               "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
    +	"runAsNonRoot":             "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
    +	"supplementalGroups":       "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified).  If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.",
    +	"supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.",
    +	"fsGroup":                  "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
    +	"sysctls":                  "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
    +	"fsGroupChangePolicy":      "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
    +	"seccompProfile":           "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
    +	"appArmorProfile":          "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
     }
     
     func (PodSecurityContext) SwaggerDoc() map[string]string {
    @@ -1766,7 +1801,7 @@ var map_PodSpec = map[string]string{
     	"serviceAccountName":            "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
     	"serviceAccount":                "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
     	"automountServiceAccountToken":  "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
    -	"nodeName":                      "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
    +	"nodeName":                      "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
     	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
     	"hostPID":                       "Use the host's pid namespace. Optional: Default to false.",
     	"hostIPC":                       "Use the host's ipc namespace. Optional: Default to false.",
    @@ -1789,7 +1824,7 @@ var map_PodSpec = map[string]string{
     	"overhead":                      "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
     	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
     	"setHostnameAsFQDN":             "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
    -	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
    +	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
     	"hostUsers":                     "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
     	"schedulingGates":               "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
     	"resourceClaims":                "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
    @@ -1943,7 +1978,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string {
     
     var map_ProjectedVolumeSource = map[string]string{
     	"":            "Represents a projected volume source",
    -	"sources":     "sources is the list of volume projections",
    +	"sources":     "sources is the list of volume projections. Each entry in this list handles one source.",
     	"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
     }
     
    @@ -2069,8 +2104,9 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
     }
     
     var map_ResourceClaim = map[string]string{
    -	"":     "ResourceClaim references one entry in PodSpec.ResourceClaims.",
    -	"name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
    +	"":        "ResourceClaim references one entry in PodSpec.ResourceClaims.",
    +	"name":    "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
    +	"request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.",
     }
     
     func (ResourceClaim) SwaggerDoc() map[string]string {
    @@ -2088,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string {
     	return map_ResourceFieldSelector
     }
     
    +var map_ResourceHealth = map[string]string{
    +	"":           "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.",
    +	"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
    +	"health":     "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n              since we do not have a mechanism today to distinguish\n              temporary and permanent issues.\n - Unknown: The status cannot be determined.\n            For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
    +}
    +
    +func (ResourceHealth) SwaggerDoc() map[string]string {
    +	return map_ResourceHealth
    +}
    +
     var map_ResourceQuota = map[string]string{
     	"":         "ResourceQuota sets aggregate quota restrictions enforced per namespace",
     	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    @@ -2141,6 +2187,15 @@ func (ResourceRequirements) SwaggerDoc() map[string]string {
     	return map_ResourceRequirements
     }
     
    +var map_ResourceStatus = map[string]string{
    +	"name":      "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.",
    +	"resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.",
    +}
    +
    +func (ResourceStatus) SwaggerDoc() map[string]string {
    +	return map_ResourceStatus
    +}
    +
     var map_SELinuxOptions = map[string]string{
     	"":      "SELinuxOptions are the labels to be applied to the container",
     	"user":  "User is a SELinux user label that applies to the container.",
    @@ -2304,7 +2359,7 @@ var map_SecurityContext = map[string]string{
     	"runAsNonRoot":             "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
     	"readOnlyRootFilesystem":   "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
     	"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
    -	"procMount":                "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
    +	"procMount":                "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
     	"seccompProfile":           "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
     	"appArmorProfile":          "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
     }
    @@ -2639,7 +2694,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
     }
     
     var map_VolumeProjection = map[string]string{
    -	"":                    "Projection that may be projected along with other supported volume types",
    +	"":                    "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.",
     	"secret":              "secret information about the secret data to project",
     	"downwardAPI":         "downwardAPI information about the downwardAPI data to project",
     	"configMap":           "configMap information about the configMap data to project",
    @@ -2692,6 +2747,7 @@ var map_VolumeSource = map[string]string{
     	"storageos":             "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
     	"csi":                   "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
     	"ephemeral":             "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n   tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n   a PersistentVolumeClaim (see EphemeralVolumeSource for more\n   information on the connection between this volume type\n   and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
    +	"image":                 "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
     }
     
     func (VolumeSource) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    index 08e927848e..3d23f7f620 100644
    --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
    @@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
     	return out
     }
     
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
    -	*out = *in
    -	if in.ResourceClaimName != nil {
    -		in, out := &in.ResourceClaimName, &out.ResourceClaimName
    -		*out = new(string)
    -		**out = **in
    -	}
    -	if in.ResourceClaimTemplateName != nil {
    -		in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
    -		*out = new(string)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
    -func (in *ClaimSource) DeepCopy() *ClaimSource {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ClaimSource)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
     	*out = *in
    @@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
     			(*in)[i].DeepCopyInto(&(*out)[i])
     		}
     	}
    +	if in.User != nil {
    +		in, out := &in.User, &out.User
    +		*out = new(ContainerUser)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.AllocatedResourcesStatus != nil {
    +		in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
    +		*out = make([]ResourceStatus, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
     	return
     }
     
    @@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
    +	*out = *in
    +	if in.Linux != nil {
    +		in, out := &in.Linux, &out.Linux
    +		*out = new(LinuxContainerUser)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
    +func (in *ContainerUser) DeepCopy() *ContainerUser {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ContainerUser)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
     	*out = *in
    @@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
    +func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ImageVolumeSource)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
     	*out = *in
    @@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
    +	*out = *in
    +	if in.SupplementalGroups != nil {
    +		in, out := &in.SupplementalGroups, &out.SupplementalGroups
    +		*out = make([]int64, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
    +func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(LinuxContainerUser)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *List) DeepCopyInto(out *List) {
     	*out = *in
    @@ -2695,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
    +	*out = *in
    +	if in.SupplementalGroupsPolicy != nil {
    +		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
    +		*out = new(bool)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
    +func (in *NodeFeatures) DeepCopy() *NodeFeatures {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(NodeFeatures)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *NodeList) DeepCopyInto(out *NodeList) {
     	*out = *in
    @@ -2782,6 +2847,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur
     		*out = new(bool)
     		**out = **in
     	}
    +	if in.UserNamespaces != nil {
    +		in, out := &in.UserNamespaces, &out.UserNamespaces
    +		*out = new(bool)
    +		**out = **in
    +	}
     	return
     }
     
    @@ -2962,6 +3032,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
     			(*in)[i].DeepCopyInto(&(*out)[i])
     		}
     	}
    +	if in.Features != nil {
    +		in, out := &in.Features, &out.Features
    +		*out = new(NodeFeatures)
    +		(*in).DeepCopyInto(*out)
    +	}
     	return
     }
     
    @@ -3971,7 +4046,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
     	*out = *in
    -	in.Source.DeepCopyInto(&out.Source)
    +	if in.ResourceClaimName != nil {
    +		in, out := &in.ResourceClaimName, &out.ResourceClaimName
    +		*out = new(string)
    +		**out = **in
    +	}
    +	if in.ResourceClaimTemplateName != nil {
    +		in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
    +		*out = new(string)
    +		**out = **in
    +	}
     	return
     }
     
    @@ -4055,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
     		*out = make([]int64, len(*in))
     		copy(*out, *in)
     	}
    +	if in.SupplementalGroupsPolicy != nil {
    +		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
    +		*out = new(SupplementalGroupsPolicy)
    +		**out = **in
    +	}
     	if in.FSGroup != nil {
     		in, out := &in.FSGroup, &out.FSGroup
     		*out = new(int64)
    @@ -4900,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
    +func (in *ResourceHealth) DeepCopy() *ResourceHealth {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceHealth)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in ResourceList) DeepCopyInto(out *ResourceList) {
     	{
    @@ -5081,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
    +	*out = *in
    +	if in.Resources != nil {
    +		in, out := &in.Resources, &out.Resources
    +		*out = make([]ResourceHealth, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
    +func (in *ResourceStatus) DeepCopy() *ResourceStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
     	*out = *in
    @@ -6426,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
     		*out = new(EphemeralVolumeSource)
     		(*in).DeepCopyInto(*out)
     	}
    +	if in.Image != nil {
    +		in, out := &in.Image, &out.Image
    +		*out = new(ImageVolumeSource)
    +		**out = **in
    +	}
     	return
     }
     
    diff --git a/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..6710a96d1c
    --- /dev/null
    +++ b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,274 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Binding) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Endpoints) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Event) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *EventList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LimitRange) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *List) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Namespace) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Node) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *NodeList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Pod) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 1
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 6
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Secret) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SecretList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Service) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 0
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 2
    +}
    diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go
    index 96ae531ce7..01913669ff 100644
    --- a/vendor/k8s.io/api/discovery/v1/doc.go
    +++ b/vendor/k8s.io/api/discovery/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=discovery.k8s.io
     
     package v1 // import "k8s.io/api/discovery/v1"
    diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto
    index 6d234017b7..8ddf0dc5d3 100644
    --- a/vendor/k8s.io/api/discovery/v1/generated.proto
    +++ b/vendor/k8s.io/api/discovery/v1/generated.proto
    @@ -54,7 +54,7 @@ message Endpoint {
       // targetRef is a reference to a Kubernetes object that represents this
       // endpoint.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference targetRef = 4;
    +  optional .k8s.io.api.core.v1.ObjectReference targetRef = 4;
     
       // deprecatedTopology contains topology information part of the v1beta1
       // API. This field is deprecated, and will be removed when the v1beta1
    @@ -161,7 +161,7 @@ message EndpointPort {
     message EndpointSlice {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // addressType specifies the type of address carried by this EndpointSlice.
       // All addresses in this slice must be the same type. This field is
    @@ -191,7 +191,7 @@ message EndpointSlice {
     message EndpointSliceList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of endpoint slices
       repeated EndpointSlice items = 2;
    diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go
    index 7ebb07ca35..d6a9d0fced 100644
    --- a/vendor/k8s.io/api/discovery/v1/types.go
    +++ b/vendor/k8s.io/api/discovery/v1/types.go
    @@ -23,6 +23,7 @@ import (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // EndpointSlice represents a subset of the endpoints that implement a service.
     // For a given service there may be multiple EndpointSlice objects, selected by
    @@ -206,6 +207,7 @@ type EndpointPort struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // EndpointSliceList represents a list of endpoint slices
     type EndpointSliceList struct {
    diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..362867c5b9
    --- /dev/null
    +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *EndpointSlice) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *EndpointSliceList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    index ec555a40b3..55828dd97d 100644
    --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
    @@ -54,7 +54,7 @@ message Endpoint {
       // targetRef is a reference to a Kubernetes object that represents this
       // endpoint.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference targetRef = 4;
    +  optional .k8s.io.api.core.v1.ObjectReference targetRef = 4;
     
       // topology contains arbitrary topology information associated with the
       // endpoint. These key/value pairs must conform with the label format.
    @@ -153,7 +153,7 @@ message EndpointPort {
     message EndpointSlice {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // addressType specifies the type of address carried by this EndpointSlice.
       // All addresses in this slice must be the same type. This field is
    @@ -183,7 +183,7 @@ message EndpointSlice {
     message EndpointSliceList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of endpoint slices
       repeated EndpointSlice items = 2;
    diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go
    index 6e320e0634..5fe700ffcf 100644
    --- a/vendor/k8s.io/api/events/v1/doc.go
    +++ b/vendor/k8s.io/api/events/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=events.k8s.io
     
     package v1 // import "k8s.io/api/events/v1"
    diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto
    index cfa16b021b..6c7e4cca19 100644
    --- a/vendor/k8s.io/api/events/v1/generated.proto
    +++ b/vendor/k8s.io/api/events/v1/generated.proto
    @@ -39,10 +39,10 @@ message Event {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // eventTime is the time when this Event was first observed. It is required.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2;
     
       // series is data about the Event series this event represents or nil if it's a singleton Event.
       // +optional
    @@ -68,12 +68,12 @@ message Event {
       // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because
       // it acts on some changes in a ReplicaSet object.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference regarding = 8;
    +  optional .k8s.io.api.core.v1.ObjectReference regarding = 8;
     
       // related is the optional secondary object for more complex actions. E.g. when regarding object triggers
       // a creation or deletion of related object.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference related = 9;
    +  optional .k8s.io.api.core.v1.ObjectReference related = 9;
     
       // note is a human-readable description of the status of this operation.
       // Maximal length of the note is 1kB, but libraries should be prepared to
    @@ -88,15 +88,15 @@ message Event {
     
       // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.api.core.v1.EventSource deprecatedSource = 12;
    +  optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12;
     
       // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13;
     
       // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14;
     
       // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    @@ -108,7 +108,7 @@ message EventList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated Event items = 2;
    @@ -123,6 +123,6 @@ message EventSeries {
       optional int32 count = 1;
     
       // lastObservedTime is the time when last Event from the series was seen before last heartbeat.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
     }
     
    diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go
    index e01a2b21e7..86b12eee15 100644
    --- a/vendor/k8s.io/api/events/v1/types.go
    +++ b/vendor/k8s.io/api/events/v1/types.go
    @@ -23,6 +23,7 @@ import (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
     // Events have a limited retention time and triggers and messages may evolve
    @@ -109,6 +110,7 @@ type EventSeries struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // EventList is a list of Event objects.
     type EventList struct {
    diff --git a/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..5217d1ac69
    --- /dev/null
    +++ b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Event) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *EventList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto
    index de60bdc3e8..fbdb309701 100644
    --- a/vendor/k8s.io/api/events/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto
    @@ -39,10 +39,10 @@ message Event {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // eventTime is the time when this Event was first observed. It is required.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2;
     
       // series is data about the Event series this event represents or nil if it's a singleton Event.
       // +optional
    @@ -72,12 +72,12 @@ message Event {
       // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because
       // it acts on some changes in a ReplicaSet object.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference regarding = 8;
    +  optional .k8s.io.api.core.v1.ObjectReference regarding = 8;
     
       // related is the optional secondary object for more complex actions. E.g. when regarding object triggers
       // a creation or deletion of related object.
       // +optional
    -  optional k8s.io.api.core.v1.ObjectReference related = 9;
    +  optional .k8s.io.api.core.v1.ObjectReference related = 9;
     
       // note is a human-readable description of the status of this operation.
       // Maximal length of the note is 1kB, but libraries should be prepared to
    @@ -92,15 +92,15 @@ message Event {
     
       // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.api.core.v1.EventSource deprecatedSource = 12;
    +  optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12;
     
       // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13;
     
       // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14;
     
       // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
       // +optional
    @@ -112,7 +112,7 @@ message EventList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated Event items = 2;
    @@ -125,6 +125,6 @@ message EventSeries {
       optional int32 count = 1;
     
       // lastObservedTime is the time when last Event from the series was seen before last heartbeat.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
     }
     
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    index 60effc8f71..9bbcaa0e26 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
    @@ -37,7 +37,7 @@ message DaemonSet {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // The desired behavior of this daemon set.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -63,7 +63,7 @@ message DaemonSetCondition {
     
       // Last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -79,7 +79,7 @@ message DaemonSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // A list of daemon sets.
       repeated DaemonSet items = 2;
    @@ -92,14 +92,14 @@ message DaemonSetSpec {
       // If empty, defaulted to labels on Pod template.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
     
       // An object that describes the pod that will be created.
       // The DaemonSet will create exactly one copy of this pod on every node
       // that matches the template's node selector (or on every node if no node
       // selector is specified).
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 2;
     
       // An update strategy to replace existing DaemonSet pods with new pods.
       // +optional
    @@ -205,7 +205,7 @@ message DaemonSetUpdateStrategy {
     message Deployment {
       // Standard object metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the Deployment.
       // +optional
    @@ -225,10 +225,10 @@ message DeploymentCondition {
       optional string status = 2;
     
       // The last time this condition was updated.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
     
       // Last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
     
       // The reason for the condition's last transition.
       optional string reason = 4;
    @@ -241,7 +241,7 @@ message DeploymentCondition {
     message DeploymentList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Deployments.
       repeated Deployment items = 2;
    @@ -271,10 +271,10 @@ message DeploymentSpec {
       // Label selector for pods. Existing ReplicaSets whose pods are
       // selected by this will be the ones affected by this deployment.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template describes the pods that will be created.
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     
       // The deployment strategy to use to replace existing pods with new ones.
       // +optional
    @@ -440,7 +440,7 @@ message Ingress {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec is the desired state of the Ingress.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -461,13 +461,13 @@ message IngressBackend {
     
       // Specifies the port of the referenced service.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
     
       // Resource is an ObjectRef to another Kubernetes resource in the namespace
       // of the Ingress object. If resource is specified, serviceName and servicePort
       // must not be specified.
       // +optional
    -  optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
    +  optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
     }
     
     // IngressList is a collection of Ingress.
    @@ -475,7 +475,7 @@ message IngressList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of Ingress.
       repeated Ingress items = 2;
    @@ -651,7 +651,7 @@ message NetworkPolicy {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior for this NetworkPolicy.
       // +optional
    @@ -710,7 +710,7 @@ message NetworkPolicyList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of schema objects.
       repeated NetworkPolicy items = 2;
    @@ -725,7 +725,7 @@ message NetworkPolicyPeer {
       // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
       // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
     
       // Selects Namespaces using cluster-scoped labels. This field follows standard label
       // selector semantics; if present but empty, it selects all namespaces.
    @@ -734,7 +734,7 @@ message NetworkPolicyPeer {
       // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
       // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
     
       // IPBlock defines policy on a particular IPBlock. If this field is set then
       // neither of the other fields can be.
    @@ -754,7 +754,7 @@ message NetworkPolicyPort {
       // numbers.
       // If present, only traffic on the specified protocol AND port will be matched.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
     
       // If set, indicates that the range of ports from port to endPort, inclusive,
       // should be allowed by the policy. This field cannot be defined if the port field
    @@ -771,7 +771,7 @@ message NetworkPolicySpec {
       // same set of pods.  In this case, the ingress rules for each are combined additively.
       // This field is NOT optional and follows standard label selector semantics.
       // An empty podSelector matches all pods in this namespace.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
     
       // List of ingress rules to be applied to the selected pods.
       // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
    @@ -818,7 +818,7 @@ message ReplicaSet {
       // be the same as the Pod(s) that the ReplicaSet manages.
       // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Spec defines the specification of the desired behavior of the ReplicaSet.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -844,7 +844,7 @@ message ReplicaSetCondition {
     
       // The last time the condition transitioned from one status to another.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -860,7 +860,7 @@ message ReplicaSetList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // List of ReplicaSets.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
    @@ -887,13 +887,13 @@ message ReplicaSetSpec {
       // Label keys and values that must match in order to be controlled by this replica set.
       // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // Template is the object that describes the pod that will be created if
       // insufficient replicas are detected.
       // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
       // +optional
    -  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
    +  optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
     }
     
     // ReplicaSetStatus represents the current status of a ReplicaSet.
    @@ -951,7 +951,7 @@ message RollingUpdateDaemonSet {
       // 70% of original number of DaemonSet pods are available at all times during
       // the update.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of nodes with an existing available DaemonSet pod that
       // can have an updated DaemonSet pod during during an update.
    @@ -973,7 +973,7 @@ message RollingUpdateDaemonSet {
       // cause evictions during disruption.
       // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // Spec to control the desired behavior of rolling update.
    @@ -989,7 +989,7 @@ message RollingUpdateDeployment {
       // that the total number of pods available at all times during the update is at
       // least 70% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
     
       // The maximum number of pods that can be scheduled above the desired number of
       // pods.
    @@ -1003,14 +1003,14 @@ message RollingUpdateDeployment {
       // new RC can be scaled up further, ensuring that total number of pods running
       // at any time during the update is at most 130% of desired pods.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
     }
     
     // represents a scaling request for a resource.
     message Scale {
       // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
       // +optional
    diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
    index cc2deadac0..09f58692f4 100644
    --- a/vendor/k8s.io/api/extensions/v1beta1/types.go
    +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
    @@ -775,7 +775,7 @@ type IngressRule struct {
     	// default backend, is left to the controller fulfilling the Ingress. Http is
     	// currently the only supported IngressRuleValue.
     	// +optional
    -	IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
    +	IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"`
     }
     
     // IngressRuleValue represents a rule to apply against incoming requests. If the
    diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go
    index 1bc51d4066..c9e7db1589 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1/doc.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     // +groupName=flowcontrol.apiserver.k8s.io
     
    diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto
    index a5c6f4fc4f..33a135889e 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1/generated.proto
    +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto
    @@ -76,7 +76,7 @@ message FlowSchema {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a FlowSchema.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -101,7 +101,7 @@ message FlowSchemaCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -115,7 +115,7 @@ message FlowSchemaList {
       // `metadata` is the standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of FlowSchemas.
       repeated FlowSchema items = 2;
    @@ -302,7 +302,7 @@ message PriorityLevelConfiguration {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a "request-priority".
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -327,7 +327,7 @@ message PriorityLevelConfigurationCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -341,7 +341,7 @@ message PriorityLevelConfigurationList {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of request-priorities.
       repeated PriorityLevelConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/flowcontrol/v1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go
    index e62d23280e..ad72bcee22 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1/types.go
    +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go
    @@ -106,6 +106,7 @@ const (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.29
     
     // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
     // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
    @@ -126,6 +127,7 @@ type FlowSchema struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.29
     
     // FlowSchemaList is a list of FlowSchema objects.
     type FlowSchemaList struct {
    @@ -381,6 +383,7 @@ type FlowSchemaConditionType string
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.29
     
     // PriorityLevelConfiguration represents the configuration of a priority level.
     type PriorityLevelConfiguration struct {
    @@ -400,6 +403,7 @@ type PriorityLevelConfiguration struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.29
     
     // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
     type PriorityLevelConfigurationList struct {
    diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..fbab9868c7
    --- /dev/null
    +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,46 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 29
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 29
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 29
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 29
    +}
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
    index 04b54820c7..61ed3833ae 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
    @@ -76,7 +76,7 @@ message FlowSchema {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a FlowSchema.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -101,7 +101,7 @@ message FlowSchemaCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -115,7 +115,7 @@ message FlowSchemaList {
       // `metadata` is the standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of FlowSchemas.
       repeated FlowSchema items = 2;
    @@ -295,7 +295,7 @@ message PriorityLevelConfiguration {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a "request-priority".
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of request-priorities.
       repeated PriorityLevelConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
    index a832114afe..d6073fc925 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
    @@ -76,7 +76,7 @@ message FlowSchema {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a FlowSchema.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -101,7 +101,7 @@ message FlowSchemaCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -115,7 +115,7 @@ message FlowSchemaList {
       // `metadata` is the standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of FlowSchemas.
       repeated FlowSchema items = 2;
    @@ -295,7 +295,7 @@ message PriorityLevelConfiguration {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a "request-priority".
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of request-priorities.
       repeated PriorityLevelConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
    index eda0f7829e..c6504d4353 100644
    --- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
    +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
    @@ -76,7 +76,7 @@ message FlowSchema {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a FlowSchema.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -101,7 +101,7 @@ message FlowSchemaCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -115,7 +115,7 @@ message FlowSchemaList {
       // `metadata` is the standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of FlowSchemas.
       repeated FlowSchema items = 2;
    @@ -297,7 +297,7 @@ message PriorityLevelConfiguration {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // `spec` is the specification of the desired behavior of a "request-priority".
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -322,7 +322,7 @@ message PriorityLevelConfigurationCondition {
       optional string status = 2;
     
       // `lastTransitionTime` is the last time the condition transitioned from one status to another.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
     
       // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
       optional string reason = 4;
    @@ -336,7 +336,7 @@ message PriorityLevelConfigurationList {
       // `metadata` is the standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // `items` is a list of request-priorities.
       repeated PriorityLevelConfiguration items = 2;
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
    new file mode 100644
    index 0000000000..5db6d52d47
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright 2016 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// +k8s:deepcopy-gen=package
    +// +k8s:protobuf-gen=package
    +// +k8s:openapi-gen=true
    +
    +// +groupName=imagepolicy.k8s.io
    +
    +package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1"
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
    new file mode 100644
    index 0000000000..57732a5164
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
    @@ -0,0 +1,1374 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by protoc-gen-gogo. DO NOT EDIT.
    +// source: k8s.io/api/imagepolicy/v1alpha1/generated.proto
    +
    +package v1alpha1
    +
    +import (
    +	fmt "fmt"
    +
    +	io "io"
    +
    +	proto "github.com/gogo/protobuf/proto"
    +	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
    +
    +	math "math"
    +	math_bits "math/bits"
    +	reflect "reflect"
    +	strings "strings"
    +)
    +
    +// Reference imports to suppress errors if they are not otherwise used.
    +var _ = proto.Marshal
    +var _ = fmt.Errorf
    +var _ = math.Inf
    +
    +// This is a compile-time assertion to ensure that this generated file
    +// is compatible with the proto package it is being compiled against.
    +// A compilation error at this line likely means your copy of the
    +// proto package needs to be updated.
    +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    +
    +func (m *ImageReview) Reset()      { *m = ImageReview{} }
    +func (*ImageReview) ProtoMessage() {}
    +func (*ImageReview) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7620d1538838ac6f, []int{0}
    +}
    +func (m *ImageReview) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ImageReview) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ImageReview.Merge(m, src)
    +}
    +func (m *ImageReview) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ImageReview) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ImageReview.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ImageReview proto.InternalMessageInfo
    +
    +func (m *ImageReviewContainerSpec) Reset()      { *m = ImageReviewContainerSpec{} }
    +func (*ImageReviewContainerSpec) ProtoMessage() {}
    +func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7620d1538838ac6f, []int{1}
    +}
    +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src)
    +}
    +func (m *ImageReviewContainerSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo
    +
    +func (m *ImageReviewSpec) Reset()      { *m = ImageReviewSpec{} }
    +func (*ImageReviewSpec) ProtoMessage() {}
    +func (*ImageReviewSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7620d1538838ac6f, []int{2}
    +}
    +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ImageReviewSpec.Merge(m, src)
    +}
    +func (m *ImageReviewSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ImageReviewSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo
    +
    +func (m *ImageReviewStatus) Reset()      { *m = ImageReviewStatus{} }
    +func (*ImageReviewStatus) ProtoMessage() {}
    +func (*ImageReviewStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_7620d1538838ac6f, []int{3}
    +}
    +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ImageReviewStatus.Merge(m, src)
    +}
    +func (m *ImageReviewStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ImageReviewStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo
    +
    +func init() {
    +	proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview")
    +	proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec")
    +	proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec")
    +	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry")
    +	proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus")
    +	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry")
    +}
    +
    +func init() {
    +	proto.RegisterFile("k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_7620d1538838ac6f)
    +}
    +
    +var fileDescriptor_7620d1538838ac6f = []byte{
    +	// 593 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
    +	0x18, 0xc6, 0x9b, 0x74, 0xff, 0xea, 0x02, 0xeb, 0x0c, 0x48, 0x51, 0x0f, 0xe9, 0x54, 0x24, 0x34,
    +	0x0e, 0xd8, 0xb4, 0x42, 0x68, 0x70, 0x00, 0x35, 0xd3, 0x24, 0x38, 0x00, 0x92, 0xb9, 0xed, 0x84,
    +	0x9b, 0x9a, 0xd4, 0xb4, 0x89, 0xa3, 0xd8, 0xe9, 0xe8, 0x8d, 0x4f, 0x80, 0xf8, 0x06, 0x7c, 0x11,
    +	0x3e, 0x40, 0x8f, 0x3b, 0xee, 0x34, 0xd1, 0x70, 0xe4, 0x4b, 0xa0, 0x38, 0x69, 0x13, 0xda, 0xa1,
    +	0xa9, 0xb7, 0xbc, 0xef, 0xeb, 0xe7, 0xf7, 0x3e, 0x79, 0x62, 0x05, 0xe0, 0xd1, 0xb1, 0x44, 0x5c,
    +	0x60, 0x1a, 0x72, 0xcc, 0x7d, 0xea, 0xb1, 0x50, 0x8c, 0xb9, 0x3b, 0xc5, 0x93, 0x0e, 0x1d, 0x87,
    +	0x43, 0xda, 0xc1, 0x1e, 0x0b, 0x58, 0x44, 0x15, 0x1b, 0xa0, 0x30, 0x12, 0x4a, 0xc0, 0x56, 0x26,
    +	0x40, 0x34, 0xe4, 0xa8, 0x24, 0x40, 0x0b, 0x41, 0xf3, 0xb1, 0xc7, 0xd5, 0x30, 0xee, 0x23, 0x57,
    +	0xf8, 0xd8, 0x13, 0x9e, 0xc0, 0x5a, 0xd7, 0x8f, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x29, 0xe3, 0x35,
    +	0x9f, 0x16, 0x06, 0x7c, 0xea, 0x0e, 0x79, 0xc0, 0xa2, 0x29, 0x0e, 0x47, 0x5e, 0xda, 0x90, 0xd8,
    +	0x67, 0x8a, 0xe2, 0xc9, 0x9a, 0x8b, 0x26, 0xfe, 0x9f, 0x2a, 0x8a, 0x03, 0xc5, 0x7d, 0xb6, 0x26,
    +	0x78, 0x76, 0x93, 0x40, 0xba, 0x43, 0xe6, 0xd3, 0x55, 0x5d, 0xfb, 0x87, 0x09, 0xea, 0x6f, 0xd2,
    +	0xd7, 0x24, 0x6c, 0xc2, 0xd9, 0x39, 0xfc, 0x08, 0xf6, 0x52, 0x4f, 0x03, 0xaa, 0xa8, 0x65, 0x1c,
    +	0x1a, 0x47, 0xf5, 0xee, 0x13, 0x54, 0x24, 0xb2, 0x44, 0xa3, 0x70, 0xe4, 0xa5, 0x0d, 0x89, 0xd2,
    +	0xd3, 0x68, 0xd2, 0x41, 0xef, 0xfb, 0x9f, 0x99, 0xab, 0xde, 0x32, 0x45, 0x1d, 0x38, 0xbb, 0x6a,
    +	0x55, 0x92, 0xab, 0x16, 0x28, 0x7a, 0x64, 0x49, 0x85, 0x04, 0x6c, 0xc9, 0x90, 0xb9, 0x96, 0xb9,
    +	0x46, 0xbf, 0x36, 0x6f, 0x54, 0x72, 0xf7, 0x21, 0x64, 0xae, 0x73, 0x2b, 0xa7, 0x6f, 0xa5, 0x15,
    +	0xd1, 0x2c, 0x78, 0x06, 0x76, 0xa4, 0xa2, 0x2a, 0x96, 0x56, 0x55, 0x53, 0xbb, 0x1b, 0x51, 0xb5,
    +	0xd2, 0xb9, 0x93, 0x73, 0x77, 0xb2, 0x9a, 0xe4, 0xc4, 0xf6, 0x2b, 0x60, 0x95, 0x0e, 0x9f, 0x88,
    +	0x40, 0xd1, 0x34, 0x82, 0x74, 0x3b, 0x7c, 0x00, 0xb6, 0x35, 0x5d, 0x47, 0x55, 0x73, 0x6e, 0xe7,
    +	0x88, 0xed, 0x4c, 0x90, 0xcd, 0xda, 0x7f, 0x4c, 0xb0, 0xbf, 0xf2, 0x12, 0xd0, 0x07, 0xc0, 0x5d,
    +	0x90, 0xa4, 0x65, 0x1c, 0x56, 0x8f, 0xea, 0xdd, 0xe7, 0x9b, 0x98, 0xfe, 0xc7, 0x47, 0x91, 0xf8,
    +	0xb2, 0x2d, 0x49, 0x69, 0x01, 0xfc, 0x02, 0xea, 0x34, 0x08, 0x84, 0xa2, 0x8a, 0x8b, 0x40, 0x5a,
    +	0xa6, 0xde, 0xd7, 0xdb, 0x34, 0x7a, 0xd4, 0x2b, 0x18, 0xa7, 0x81, 0x8a, 0xa6, 0xce, 0xdd, 0x7c,
    +	0x6f, 0xbd, 0x34, 0x21, 0xe5, 0x55, 0x10, 0x83, 0x5a, 0x40, 0x7d, 0x26, 0x43, 0xea, 0x32, 0xfd,
    +	0x71, 0x6a, 0xce, 0x41, 0x2e, 0xaa, 0xbd, 0x5b, 0x0c, 0x48, 0x71, 0xa6, 0xf9, 0x12, 0x34, 0x56,
    +	0xd7, 0xc0, 0x06, 0xa8, 0x8e, 0xd8, 0x34, 0x0b, 0x99, 0xa4, 0x8f, 0xf0, 0x1e, 0xd8, 0x9e, 0xd0,
    +	0x71, 0xcc, 0xf4, 0x2d, 0xaa, 0x91, 0xac, 0x78, 0x61, 0x1e, 0x1b, 0xed, 0x9f, 0x26, 0x38, 0x58,
    +	0xfb, 0xb8, 0xf0, 0x11, 0xd8, 0xa5, 0xe3, 0xb1, 0x38, 0x67, 0x03, 0x4d, 0xd9, 0x73, 0xf6, 0x73,
    +	0x13, 0xbb, 0xbd, 0xac, 0x4d, 0x16, 0x73, 0xf8, 0x10, 0xec, 0x44, 0x8c, 0x4a, 0x11, 0x64, 0xec,
    +	0xe2, 0x5e, 0x10, 0xdd, 0x25, 0xf9, 0x14, 0x7e, 0x33, 0x40, 0x83, 0xc6, 0x03, 0xae, 0x4a, 0x76,
    +	0xad, 0xaa, 0x4e, 0xf6, 0xf5, 0xe6, 0xd7, 0x0f, 0xf5, 0x56, 0x50, 0x59, 0xc0, 0x56, 0xbe, 0xbc,
    +	0xb1, 0x3a, 0x26, 0x6b, 0xbb, 0x9b, 0x27, 0xe0, 0xfe, 0xb5, 0x90, 0x4d, 0xe2, 0x73, 0x4e, 0x67,
    +	0x73, 0xbb, 0x72, 0x31, 0xb7, 0x2b, 0x97, 0x73, 0xbb, 0xf2, 0x35, 0xb1, 0x8d, 0x59, 0x62, 0x1b,
    +	0x17, 0x89, 0x6d, 0x5c, 0x26, 0xb6, 0xf1, 0x2b, 0xb1, 0x8d, 0xef, 0xbf, 0xed, 0xca, 0x59, 0xeb,
    +	0x86, 0xbf, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0x86, 0x92, 0x15, 0x77, 0x05, 0x00,
    +	0x00,
    +}
    +
    +func (m *ImageReview) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Image)
    +	copy(dAtA[i:], m.Image)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Namespace)
    +	copy(dAtA[i:], m.Namespace)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    +	i--
    +	dAtA[i] = 0x1a
    +	if len(m.Annotations) > 0 {
    +		keysForAnnotations := make([]string, 0, len(m.Annotations))
    +		for k := range m.Annotations {
    +			keysForAnnotations = append(keysForAnnotations, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
    +		for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Annotations[string(keysForAnnotations[iNdEx])]
    +			baseI := i
    +			i -= len(v)
    +			copy(dAtA[i:], v)
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForAnnotations[iNdEx])
    +			copy(dAtA[i:], keysForAnnotations[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Containers) > 0 {
    +		for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.AuditAnnotations) > 0 {
    +		keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
    +		for k := range m.AuditAnnotations {
    +			keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
    +		for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
    +			baseI := i
    +			i -= len(v)
    +			copy(dAtA[i:], v)
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForAuditAnnotations[iNdEx])
    +			copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	i -= len(m.Reason)
    +	copy(dAtA[i:], m.Reason)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
    +	i--
    +	dAtA[i] = 0x12
    +	i--
    +	if m.Allowed {
    +		dAtA[i] = 1
    +	} else {
    +		dAtA[i] = 0
    +	}
    +	i--
    +	dAtA[i] = 0x8
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *ImageReview) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ImageReviewContainerSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Image)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ImageReviewSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Containers) > 0 {
    +		for _, e := range m.Containers {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Annotations) > 0 {
    +		for k, v := range m.Annotations {
    +			_ = k
    +			_ = v
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	l = len(m.Namespace)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ImageReviewStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	n += 2
    +	l = len(m.Reason)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.AuditAnnotations) > 0 {
    +		for k, v := range m.AuditAnnotations {
    +			_ = k
    +			_ = v
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *ImageReview) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ImageReview{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ImageReviewContainerSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ImageReviewContainerSpec{`,
    +		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ImageReviewSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForContainers := "[]ImageReviewContainerSpec{"
    +	for _, f := range this.Containers {
    +		repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForContainers += "}"
    +	keysForAnnotations := make([]string, 0, len(this.Annotations))
    +	for k := range this.Annotations {
    +		keysForAnnotations = append(keysForAnnotations, k)
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
    +	mapStringForAnnotations := "map[string]string{"
    +	for _, k := range keysForAnnotations {
    +		mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
    +	}
    +	mapStringForAnnotations += "}"
    +	s := strings.Join([]string{`&ImageReviewSpec{`,
    +		`Containers:` + repeatedStringForContainers + `,`,
    +		`Annotations:` + mapStringForAnnotations + `,`,
    +		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ImageReviewStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
    +	for k := range this.AuditAnnotations {
    +		keysForAuditAnnotations = append(keysForAuditAnnotations, k)
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
    +	mapStringForAuditAnnotations := "map[string]string{"
    +	for _, k := range keysForAuditAnnotations {
    +		mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
    +	}
    +	mapStringForAuditAnnotations += "}"
    +	s := strings.Join([]string{`&ImageReviewStatus{`,
    +		`Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
    +		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
    +		`AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *ImageReview) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ImageReview: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Image = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Containers = append(m.Containers, ImageReviewContainerSpec{})
    +			if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Annotations == nil {
    +				m.Annotations = make(map[string]string)
    +			}
    +			var mapkey string
    +			var mapvalue string
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var stringLenmapvalue uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapvalue |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapvalue := int(stringLenmapvalue)
    +					if intStringLenmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
    +					if postStringIndexmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapvalue > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
    +					iNdEx = postStringIndexmapvalue
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Annotations[mapkey] = mapvalue
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Namespace = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.Allowed = bool(v != 0)
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Reason = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.AuditAnnotations == nil {
    +				m.AuditAnnotations = make(map[string]string)
    +			}
    +			var mapkey string
    +			var mapvalue string
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var stringLenmapvalue uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapvalue |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapvalue := int(stringLenmapvalue)
    +					if intStringLenmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
    +					if postStringIndexmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapvalue > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
    +					iNdEx = postStringIndexmapvalue
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.AuditAnnotations[mapkey] = mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func skipGenerated(dAtA []byte) (n int, err error) {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	depth := 0
    +	for iNdEx < l {
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return 0, ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return 0, io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= (uint64(b) & 0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		wireType := int(wire & 0x7)
    +		switch wireType {
    +		case 0:
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				iNdEx++
    +				if dAtA[iNdEx-1] < 0x80 {
    +					break
    +				}
    +			}
    +		case 1:
    +			iNdEx += 8
    +		case 2:
    +			var length int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				length |= (int(b) & 0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if length < 0 {
    +				return 0, ErrInvalidLengthGenerated
    +			}
    +			iNdEx += length
    +		case 3:
    +			depth++
    +		case 4:
    +			if depth == 0 {
    +				return 0, ErrUnexpectedEndOfGroupGenerated
    +			}
    +			depth--
    +		case 5:
    +			iNdEx += 4
    +		default:
    +			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    +		}
    +		if iNdEx < 0 {
    +			return 0, ErrInvalidLengthGenerated
    +		}
    +		if depth == 0 {
    +			return iNdEx, nil
    +		}
    +	}
    +	return 0, io.ErrUnexpectedEOF
    +}
    +
    +var (
    +	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    +	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    +	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    +)
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
    new file mode 100644
    index 0000000000..5ea5c0ec8e
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +
    +// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    +
    +syntax = "proto2";
    +
    +package k8s.io.api.imagepolicy.v1alpha1;
    +
    +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    +
    +// Package-wide variables from generator "generated".
    +option go_package = "k8s.io/api/imagepolicy/v1alpha1";
    +
    +// ImageReview checks if the set of images in a pod are allowed.
    +message ImageReview {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Spec holds information about the pod being evaluated
    +  optional ImageReviewSpec spec = 2;
    +
    +  // Status is filled in by the backend and indicates whether the pod should be allowed.
    +  // +optional
    +  optional ImageReviewStatus status = 3;
    +}
    +
    +// ImageReviewContainerSpec is a description of a container within the pod creation request.
    +message ImageReviewContainerSpec {
    +  // This can be in the form image:tag or image@SHA:012345679abcdef.
    +  // +optional
    +  optional string image = 1;
    +}
    +
    +// ImageReviewSpec is a description of the pod creation request.
    +message ImageReviewSpec {
    +  // Containers is a list of a subset of the information in each container of the Pod being created.
    +  // +optional
    +  // +listType=atomic
    +  repeated ImageReviewContainerSpec containers = 1;
    +
    +  // Annotations is a list of key-value pairs extracted from the Pod's annotations.
    +  // It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
    +  // It is up to each webhook backend to determine how to interpret these annotations, if at all.
    +  // +optional
    +  map annotations = 2;
    +
    +  // Namespace is the namespace the pod is being created in.
    +  // +optional
    +  optional string namespace = 3;
    +}
    +
    +// ImageReviewStatus is the result of the review for the pod creation request.
    +message ImageReviewStatus {
    +  // Allowed indicates that all images were allowed to be run.
    +  optional bool allowed = 1;
    +
    +  // Reason should be empty unless Allowed is false in which case it
    +  // may contain a short description of what is wrong.  Kubernetes
    +  // may truncate excessively long errors when displaying to the user.
    +  // +optional
    +  optional string reason = 2;
    +
    +  // AuditAnnotations will be added to the attributes object of the
    +  // admission controller request using 'AddAnnotation'.  The keys should
    +  // be prefix-less (i.e., the admission controller will add an
    +  // appropriate prefix).
    +  // +optional
    +  map auditAnnotations = 3;
    +}
    +
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
    new file mode 100644
    index 0000000000..477571bbb2
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
    @@ -0,0 +1,51 @@
    +/*
    +Copyright 2016 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +import (
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +)
    +
    +// GroupName is the group name for this API.
    +const GroupName = "imagepolicy.k8s.io"
    +
    +// SchemeGroupVersion is group version used to register these objects
    +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
    +
    +// Resource takes an unqualified resource and returns a Group qualified GroupResource
    +func Resource(resource string) schema.GroupResource {
    +	return SchemeGroupVersion.WithResource(resource).GroupResource()
    +}
    +
    +var (
    +	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
    +	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
    +	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
    +	localSchemeBuilder = &SchemeBuilder
    +	AddToScheme        = localSchemeBuilder.AddToScheme
    +)
    +
    +// Adds the list of known types to the given scheme.
    +func addKnownTypes(scheme *runtime.Scheme) error {
    +	scheme.AddKnownTypes(SchemeGroupVersion,
    +		&ImageReview{},
    +	)
    +	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    +	return nil
    +}
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
    new file mode 100644
    index 0000000000..19ac2b536f
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
    @@ -0,0 +1,83 @@
    +/*
    +Copyright 2016 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +import (
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +)
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +genclient:noVerbs
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// ImageReview checks if the set of images in a pod are allowed.
    +type ImageReview struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Spec holds information about the pod being evaluated
    +	Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
    +
    +	// Status is filled in by the backend and indicates whether the pod should be allowed.
    +	// +optional
    +	Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// ImageReviewSpec is a description of the pod creation request.
    +type ImageReviewSpec struct {
    +	// Containers is a list of a subset of the information in each container of the Pod being created.
    +	// +optional
    +	// +listType=atomic
    +	Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"`
    +	// Annotations is a list of key-value pairs extracted from the Pod's annotations.
    +	// It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
    +	// It is up to each webhook backend to determine how to interpret these annotations, if at all.
    +	// +optional
    +	Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"`
    +	// Namespace is the namespace the pod is being created in.
    +	// +optional
    +	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
    +}
    +
    +// ImageReviewContainerSpec is a description of a container within the pod creation request.
    +type ImageReviewContainerSpec struct {
    +	// This can be in the form image:tag or image@SHA:012345679abcdef.
    +	// +optional
    +	Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
    +	// In future, we may add command line overrides, exec health check command lines, and so on.
    +}
    +
    +// ImageReviewStatus is the result of the review for the pod creation request.
    +type ImageReviewStatus struct {
    +	// Allowed indicates that all images were allowed to be run.
    +	Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
    +	// Reason should be empty unless Allowed is false in which case it
    +	// may contain a short description of what is wrong.  Kubernetes
    +	// may truncate excessively long errors when displaying to the user.
    +	// +optional
    +	Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
    +	// AuditAnnotations will be added to the attributes object of the
    +	// admission controller request using 'AddAnnotation'.  The keys should
    +	// be prefix-less (i.e., the admission controller will add an
    +	// appropriate prefix).
    +	// +optional
    +	AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"`
    +}
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
    new file mode 100644
    index 0000000000..dadf95e1d5
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
    @@ -0,0 +1,72 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha1
    +
    +// This file contains a collection of methods that can be used from go-restful to
    +// generate Swagger API documentation for its models. Please read this PR for more
    +// information on the implementation: https://github.com/emicklei/go-restful/pull/215
    +//
    +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
    +// they are on one line! For multiple line or blocks that you want to ignore use ---.
    +// Any context after a --- is ignored.
    +//
    +// Those methods can be generated by using hack/update-codegen.sh
    +
    +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    +var map_ImageReview = map[string]string{
    +	"":         "ImageReview checks if the set of images in a pod are allowed.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "Spec holds information about the pod being evaluated",
    +	"status":   "Status is filled in by the backend and indicates whether the pod should be allowed.",
    +}
    +
    +func (ImageReview) SwaggerDoc() map[string]string {
    +	return map_ImageReview
    +}
    +
    +var map_ImageReviewContainerSpec = map[string]string{
    +	"":      "ImageReviewContainerSpec is a description of a container within the pod creation request.",
    +	"image": "This can be in the form image:tag or image@SHA:012345679abcdef.",
    +}
    +
    +func (ImageReviewContainerSpec) SwaggerDoc() map[string]string {
    +	return map_ImageReviewContainerSpec
    +}
    +
    +var map_ImageReviewSpec = map[string]string{
    +	"":            "ImageReviewSpec is a description of the pod creation request.",
    +	"containers":  "Containers is a list of a subset of the information in each container of the Pod being created.",
    +	"annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.",
    +	"namespace":   "Namespace is the namespace the pod is being created in.",
    +}
    +
    +func (ImageReviewSpec) SwaggerDoc() map[string]string {
    +	return map_ImageReviewSpec
    +}
    +
    +var map_ImageReviewStatus = map[string]string{
    +	"":                 "ImageReviewStatus is the result of the review for the pod creation request.",
    +	"allowed":          "Allowed indicates that all images were allowed to be run.",
    +	"reason":           "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong.  Kubernetes may truncate excessively long errors when displaying to the user.",
    +	"auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'.  The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).",
    +}
    +
    +func (ImageReviewStatus) SwaggerDoc() map[string]string {
    +	return map_ImageReviewStatus
    +}
    +
    +// AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
    new file mode 100644
    index 0000000000..f230656f3f
    --- /dev/null
    +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
    @@ -0,0 +1,121 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by deepcopy-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ImageReview) DeepCopyInto(out *ImageReview) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview.
    +func (in *ImageReview) DeepCopy() *ImageReview {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ImageReview)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ImageReview) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec.
    +func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ImageReviewContainerSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) {
    +	*out = *in
    +	if in.Containers != nil {
    +		in, out := &in.Containers, &out.Containers
    +		*out = make([]ImageReviewContainerSpec, len(*in))
    +		copy(*out, *in)
    +	}
    +	if in.Annotations != nil {
    +		in, out := &in.Annotations, &out.Annotations
    +		*out = make(map[string]string, len(*in))
    +		for key, val := range *in {
    +			(*out)[key] = val
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec.
    +func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ImageReviewSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) {
    +	*out = *in
    +	if in.AuditAnnotations != nil {
    +		in, out := &in.AuditAnnotations, &out.AuditAnnotations
    +		*out = make(map[string]string, len(*in))
    +		for key, val := range *in {
    +			(*out)[key] = val
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus.
    +func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ImageReviewStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go
    index d3ffd5ed17..1d13e7bab3 100644
    --- a/vendor/k8s.io/api/networking/v1/doc.go
    +++ b/vendor/k8s.io/api/networking/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=networking.k8s.io
     
     package v1 // import "k8s.io/api/networking/v1"
    diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
    index 22a9085a54..c72fdc8f37 100644
    --- a/vendor/k8s.io/api/networking/v1/generated.proto
    +++ b/vendor/k8s.io/api/networking/v1/generated.proto
    @@ -96,7 +96,7 @@ message Ingress {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the Ingress.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -121,7 +121,7 @@ message IngressBackend {
       // service.Port must not be specified.
       // This is a mutually exclusive setting with "Service".
       // +optional
    -  optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
    +  optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
     }
     
     // IngressClass represents the class of the Ingress, referenced by the Ingress
    @@ -133,7 +133,7 @@ message IngressClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the IngressClass.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -145,7 +145,7 @@ message IngressClass {
     message IngressClassList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of IngressClasses.
       repeated IngressClass items = 2;
    @@ -200,7 +200,7 @@ message IngressList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of Ingress.
       repeated Ingress items = 2;
    @@ -381,7 +381,7 @@ message NetworkPolicy {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents the specification of the desired behavior for this NetworkPolicy.
       // +optional
    @@ -438,7 +438,7 @@ message NetworkPolicyList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated NetworkPolicy items = 2;
    @@ -454,7 +454,7 @@ message NetworkPolicyPeer {
       // the pods matching podSelector in the Namespaces selected by NamespaceSelector.
       // Otherwise it selects the pods matching podSelector in the policy's own namespace.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
     
       // namespaceSelector selects namespaces using cluster-scoped labels. This field follows
       // standard label selector semantics; if present but empty, it selects all namespaces.
    @@ -463,7 +463,7 @@ message NetworkPolicyPeer {
       // the pods matching podSelector in the namespaces selected by namespaceSelector.
       // Otherwise it selects all pods in the namespaces selected by namespaceSelector.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
     
       // ipBlock defines policy on a particular IPBlock. If this field is set then
       // neither of the other fields can be.
    @@ -483,7 +483,7 @@ message NetworkPolicyPort {
       // numbers.
       // If present, only traffic on the specified protocol AND port will be matched.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
     
       // endPort indicates that the range of ports from port to endPort if set, inclusive,
       // should be allowed by the policy. This field cannot be defined if the port field
    @@ -501,7 +501,7 @@ message NetworkPolicySpec {
       // the ingress rules for each are combined additively.
       // This field is NOT optional and follows standard label selector semantics.
       // An empty podSelector matches all pods in this namespace.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
     
       // ingress is a list of ingress rules to be applied to the selected pods.
       // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
    @@ -541,6 +541,7 @@ message NetworkPolicySpec {
     }
     
     // ServiceBackendPort is the service port being referenced.
    +// +structType=atomic
     message ServiceBackendPort {
       // name is the name of the port on the Service.
       // This is a mutually exclusive setting with "Number".
    diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
    index 8ee62918b0..d75e27558d 100644
    --- a/vendor/k8s.io/api/networking/v1/types.go
    +++ b/vendor/k8s.io/api/networking/v1/types.go
    @@ -24,6 +24,7 @@ import (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.7
     
     // NetworkPolicy describes what network traffic is allowed for a set of Pods
     type NetworkPolicy struct {
    @@ -214,6 +215,7 @@ type NetworkPolicyPeer struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // NetworkPolicyList is a list of NetworkPolicy objects.
     type NetworkPolicyList struct {
    @@ -230,6 +232,7 @@ type NetworkPolicyList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // Ingress is a collection of rules that allow inbound connections to reach the
     // endpoints defined by a backend. An Ingress can be configured to give services
    @@ -255,6 +258,7 @@ type Ingress struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // IngressList is a collection of Ingress.
     type IngressList struct {
    @@ -415,7 +419,7 @@ type IngressRule struct {
     	// default backend, is left to the controller fulfilling the Ingress. Http is
     	// currently the only supported IngressRuleValue.
     	// +optional
    -	IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
    +	IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"`
     }
     
     // IngressRuleValue represents a rule to apply against incoming requests. If the
    @@ -527,6 +531,7 @@ type IngressServiceBackend struct {
     }
     
     // ServiceBackendPort is the service port being referenced.
    +// +structType=atomic
     type ServiceBackendPort struct {
     	// name is the name of the port on the Service.
     	// This is a mutually exclusive setting with "Number".
    @@ -542,6 +547,7 @@ type ServiceBackendPort struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // IngressClass represents the class of the Ingress, referenced by the Ingress
     // Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
    @@ -616,6 +622,7 @@ type IngressClassParametersReference struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.19
     
     // IngressClassList is a collection of IngressClasses.
     type IngressClassList struct {
    diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..21e8c671a5
    --- /dev/null
    +++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,58 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Ingress) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IngressClass) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IngressClassList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IngressList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 7
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 19
    +}
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto
    index 8914fffcf8..80ec6af735 100644
    --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto
    @@ -39,7 +39,7 @@ message IPAddress {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the IPAddress.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -52,7 +52,7 @@ message IPAddressList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of IPAddresses.
       repeated IPAddress items = 2;
    @@ -91,7 +91,7 @@ message ServiceCIDR {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the ServiceCIDR.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -109,7 +109,7 @@ message ServiceCIDRList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of ServiceCIDRs.
       repeated ServiceCIDR items = 2;
    @@ -119,6 +119,9 @@ message ServiceCIDRList {
     message ServiceCIDRSpec {
       // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
       // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +  // The network address of each CIDR, the address that identifies the subnet of a host, is reserved
    +  // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
    +  // allocated.
       // This field is immutable.
       // +optional
       // +listType=atomic
    @@ -134,6 +137,6 @@ message ServiceCIDRStatus {
       // +patchStrategy=merge
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
     }
     
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go
    index bcdc33b459..0e454f0263 100644
    --- a/vendor/k8s.io/api/networking/v1alpha1/types.go
    +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go
    @@ -109,6 +109,9 @@ type ServiceCIDR struct {
     type ServiceCIDRSpec struct {
     	// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
     	// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +	// The network address of each CIDR, the address that identifies the subnet of a host, is reserved
    +	// and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
    +	// allocated.
     	// This field is immutable.
     	// +optional
     	// +listType=atomic
    diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
    index 481ec06030..4c8eb57a7a 100644
    --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
    @@ -91,7 +91,7 @@ func (ServiceCIDRList) SwaggerDoc() map[string]string {
     
     var map_ServiceCIDRSpec = map[string]string{
     	"":      "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
    -	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
    +	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.",
     }
     
     func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
    diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
    index 13d4f53855..a924725f28 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
    @@ -27,6 +27,7 @@ import (
     	proto "github.com/gogo/protobuf/proto"
     	k8s_io_api_core_v1 "k8s.io/api/core/v1"
     	v11 "k8s.io/api/core/v1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     
     	math "math"
     	math_bits "math/bits"
    @@ -101,10 +102,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
     
    +func (m *IPAddress) Reset()      { *m = IPAddress{} }
    +func (*IPAddress) ProtoMessage() {}
    +func (*IPAddress) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{2}
    +}
    +func (m *IPAddress) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddress) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddress.Merge(m, src)
    +}
    +func (m *IPAddress) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddress) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddress.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddress proto.InternalMessageInfo
    +
    +func (m *IPAddressList) Reset()      { *m = IPAddressList{} }
    +func (*IPAddressList) ProtoMessage() {}
    +func (*IPAddressList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{3}
    +}
    +func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddressList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddressList.Merge(m, src)
    +}
    +func (m *IPAddressList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddressList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddressList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
    +
    +func (m *IPAddressSpec) Reset()      { *m = IPAddressSpec{} }
    +func (*IPAddressSpec) ProtoMessage() {}
    +func (*IPAddressSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{4}
    +}
    +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_IPAddressSpec.Merge(m, src)
    +}
    +func (m *IPAddressSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *IPAddressSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
    +
     func (m *Ingress) Reset()      { *m = Ingress{} }
     func (*Ingress) ProtoMessage() {}
     func (*Ingress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{2}
    +	return fileDescriptor_9497719c79c89d2d, []int{5}
     }
     func (m *Ingress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -132,7 +217,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo
     func (m *IngressBackend) Reset()      { *m = IngressBackend{} }
     func (*IngressBackend) ProtoMessage() {}
     func (*IngressBackend) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{3}
    +	return fileDescriptor_9497719c79c89d2d, []int{6}
     }
     func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -160,7 +245,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
     func (m *IngressClass) Reset()      { *m = IngressClass{} }
     func (*IngressClass) ProtoMessage() {}
     func (*IngressClass) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{4}
    +	return fileDescriptor_9497719c79c89d2d, []int{7}
     }
     func (m *IngressClass) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -188,7 +273,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo
     func (m *IngressClassList) Reset()      { *m = IngressClassList{} }
     func (*IngressClassList) ProtoMessage() {}
     func (*IngressClassList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{5}
    +	return fileDescriptor_9497719c79c89d2d, []int{8}
     }
     func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -216,7 +301,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
     func (m *IngressClassParametersReference) Reset()      { *m = IngressClassParametersReference{} }
     func (*IngressClassParametersReference) ProtoMessage() {}
     func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{6}
    +	return fileDescriptor_9497719c79c89d2d, []int{9}
     }
     func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -244,7 +329,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
     func (m *IngressClassSpec) Reset()      { *m = IngressClassSpec{} }
     func (*IngressClassSpec) ProtoMessage() {}
     func (*IngressClassSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{7}
    +	return fileDescriptor_9497719c79c89d2d, []int{10}
     }
     func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -272,7 +357,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
     func (m *IngressList) Reset()      { *m = IngressList{} }
     func (*IngressList) ProtoMessage() {}
     func (*IngressList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{8}
    +	return fileDescriptor_9497719c79c89d2d, []int{11}
     }
     func (m *IngressList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -300,7 +385,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo
     func (m *IngressLoadBalancerIngress) Reset()      { *m = IngressLoadBalancerIngress{} }
     func (*IngressLoadBalancerIngress) ProtoMessage() {}
     func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{9}
    +	return fileDescriptor_9497719c79c89d2d, []int{12}
     }
     func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -328,7 +413,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
     func (m *IngressLoadBalancerStatus) Reset()      { *m = IngressLoadBalancerStatus{} }
     func (*IngressLoadBalancerStatus) ProtoMessage() {}
     func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{10}
    +	return fileDescriptor_9497719c79c89d2d, []int{13}
     }
     func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -356,7 +441,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
     func (m *IngressPortStatus) Reset()      { *m = IngressPortStatus{} }
     func (*IngressPortStatus) ProtoMessage() {}
     func (*IngressPortStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{11}
    +	return fileDescriptor_9497719c79c89d2d, []int{14}
     }
     func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -384,7 +469,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
     func (m *IngressRule) Reset()      { *m = IngressRule{} }
     func (*IngressRule) ProtoMessage() {}
     func (*IngressRule) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{12}
    +	return fileDescriptor_9497719c79c89d2d, []int{15}
     }
     func (m *IngressRule) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -412,7 +497,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo
     func (m *IngressRuleValue) Reset()      { *m = IngressRuleValue{} }
     func (*IngressRuleValue) ProtoMessage() {}
     func (*IngressRuleValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{13}
    +	return fileDescriptor_9497719c79c89d2d, []int{16}
     }
     func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -440,7 +525,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
     func (m *IngressSpec) Reset()      { *m = IngressSpec{} }
     func (*IngressSpec) ProtoMessage() {}
     func (*IngressSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{14}
    +	return fileDescriptor_9497719c79c89d2d, []int{17}
     }
     func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -468,7 +553,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
     func (m *IngressStatus) Reset()      { *m = IngressStatus{} }
     func (*IngressStatus) ProtoMessage() {}
     func (*IngressStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{15}
    +	return fileDescriptor_9497719c79c89d2d, []int{18}
     }
     func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -496,7 +581,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
     func (m *IngressTLS) Reset()      { *m = IngressTLS{} }
     func (*IngressTLS) ProtoMessage() {}
     func (*IngressTLS) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_9497719c79c89d2d, []int{16}
    +	return fileDescriptor_9497719c79c89d2d, []int{19}
     }
     func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -521,9 +606,152 @@ func (m *IngressTLS) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
     
    +func (m *ParentReference) Reset()      { *m = ParentReference{} }
    +func (*ParentReference) ProtoMessage() {}
    +func (*ParentReference) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{20}
    +}
    +func (m *ParentReference) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ParentReference) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ParentReference.Merge(m, src)
    +}
    +func (m *ParentReference) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ParentReference) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ParentReference.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ParentReference proto.InternalMessageInfo
    +
    +func (m *ServiceCIDR) Reset()      { *m = ServiceCIDR{} }
    +func (*ServiceCIDR) ProtoMessage() {}
    +func (*ServiceCIDR) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{21}
    +}
    +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDR.Merge(m, src)
    +}
    +func (m *ServiceCIDR) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDR) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRList) Reset()      { *m = ServiceCIDRList{} }
    +func (*ServiceCIDRList) ProtoMessage() {}
    +func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{22}
    +}
    +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRList.Merge(m, src)
    +}
    +func (m *ServiceCIDRList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRSpec) Reset()      { *m = ServiceCIDRSpec{} }
    +func (*ServiceCIDRSpec) ProtoMessage() {}
    +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{23}
    +}
    +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
    +}
    +func (m *ServiceCIDRSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
    +
    +func (m *ServiceCIDRStatus) Reset()      { *m = ServiceCIDRStatus{} }
    +func (*ServiceCIDRStatus) ProtoMessage() {}
    +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_9497719c79c89d2d, []int{24}
    +}
    +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
    +}
    +func (m *ServiceCIDRStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
    +
     func init() {
     	proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath")
     	proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue")
    +	proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress")
    +	proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList")
    +	proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec")
     	proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress")
     	proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend")
     	proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass")
    @@ -539,6 +767,11 @@ func init() {
     	proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec")
     	proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus")
     	proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS")
    +	proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference")
    +	proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR")
    +	proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList")
    +	proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec")
    +	proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus")
     }
     
     func init() {
    @@ -546,85 +779,99 @@ func init() {
     }
     
     var fileDescriptor_9497719c79c89d2d = []byte{
    -	// 1234 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5,
    -	0x1b, 0xcf, 0xda, 0x71, 0xe3, 0x8c, 0xd3, 0x34, 0xff, 0xf9, 0xe7, 0x60, 0x82, 0x6a, 0x47, 0x7b,
    -	0x40, 0x81, 0x36, 0xbb, 0x4d, 0x5a, 0x50, 0xb9, 0x20, 0xd8, 0x08, 0x91, 0x28, 0x21, 0x31, 0x63,
    -	0xf3, 0x22, 0xc4, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0x66, 0x83, 0x7a, 0x03,
    -	0x71, 0xe2, 0x04, 0xdf, 0x01, 0x89, 0x8f, 0x80, 0xb8, 0x20, 0x21, 0xb8, 0xe4, 0xd8, 0x63, 0x2f,
    -	0x44, 0xc4, 0x7c, 0x8b, 0x9e, 0xd0, 0x33, 0x3b, 0xfb, 0xe2, 0x97, 0xb4, 0x0e, 0x87, 0x9e, 0xe2,
    -	0x7d, 0x5e, 0x7e, 0xcf, 0xfb, 0x33, 0x4f, 0xd0, 0xf6, 0xe0, 0xa1, 0xb0, 0xbc, 0xd0, 0xa6, 0x91,
    -	0x67, 0x07, 0x4c, 0x7e, 0x1d, 0xf2, 0x81, 0x17, 0xf4, 0xec, 0xb3, 0x9d, 0x53, 0x26, 0xe9, 0x8e,
    -	0xdd, 0x63, 0x01, 0xe3, 0x54, 0xb2, 0xae, 0x15, 0xf1, 0x50, 0x86, 0xf8, 0x76, 0x22, 0x6e, 0xd1,
    -	0xc8, 0xb3, 0x72, 0x71, 0x4b, 0x8b, 0x6f, 0x6c, 0xf7, 0x3c, 0xd9, 0x8f, 0x4f, 0x2d, 0x37, 0x1c,
    -	0xda, 0xbd, 0xb0, 0x17, 0xda, 0x4a, 0xeb, 0x34, 0x7e, 0xa4, 0xbe, 0xd4, 0x87, 0xfa, 0x95, 0xa0,
    -	0x6d, 0x98, 0x05, 0xe3, 0x6e, 0xc8, 0x99, 0x7d, 0x36, 0x65, 0x71, 0xe3, 0x41, 0x2e, 0x33, 0xa4,
    -	0x6e, 0xdf, 0x0b, 0x18, 0x7f, 0x6c, 0x47, 0x83, 0x1e, 0x10, 0x84, 0x3d, 0x64, 0x92, 0xce, 0xd2,
    -	0xb2, 0xaf, 0xd2, 0xe2, 0x71, 0x20, 0xbd, 0x21, 0x9b, 0x52, 0x78, 0xeb, 0x45, 0x0a, 0xc2, 0xed,
    -	0xb3, 0x21, 0x9d, 0xd2, 0xbb, 0x7f, 0x95, 0x5e, 0x2c, 0x3d, 0xdf, 0xf6, 0x02, 0x29, 0x24, 0x9f,
    -	0x54, 0x32, 0xff, 0x34, 0xd0, 0xad, 0xfd, 0x4e, 0xa7, 0x75, 0x10, 0xf4, 0x38, 0x13, 0xa2, 0x45,
    -	0x65, 0x1f, 0x6f, 0xa2, 0xc5, 0x88, 0xca, 0x7e, 0xdd, 0xd8, 0x34, 0xb6, 0x96, 0x9d, 0x95, 0xf3,
    -	0x8b, 0xe6, 0xc2, 0xe8, 0xa2, 0xb9, 0x08, 0x3c, 0xa2, 0x38, 0xf8, 0x01, 0xaa, 0xc2, 0xdf, 0xce,
    -	0xe3, 0x88, 0xd5, 0xcb, 0x4a, 0xaa, 0x3e, 0xba, 0x68, 0x56, 0x5b, 0x9a, 0xf6, 0xac, 0xf0, 0x9b,
    -	0x64, 0x92, 0xf8, 0x33, 0xb4, 0x74, 0x4a, 0xdd, 0x01, 0x0b, 0xba, 0xf5, 0xd2, 0xa6, 0xb1, 0x55,
    -	0xdb, 0xdd, 0xb6, 0x9e, 0x5b, 0x43, 0x4b, 0x3b, 0xe5, 0x24, 0x4a, 0xce, 0x2d, 0xed, 0xc9, 0x92,
    -	0x26, 0x90, 0x14, 0xce, 0x1c, 0xa0, 0xf5, 0x42, 0x10, 0x24, 0xf6, 0xd9, 0x27, 0xd4, 0x8f, 0x19,
    -	0x6e, 0xa3, 0x0a, 0x58, 0x17, 0x75, 0x63, 0xb3, 0xbc, 0x55, 0xdb, 0xb5, 0x5e, 0x60, 0x6f, 0x22,
    -	0x11, 0xce, 0x4d, 0x6d, 0xb0, 0x02, 0x5f, 0x82, 0x24, 0x58, 0xe6, 0x0f, 0x25, 0xb4, 0xa4, 0xa5,
    -	0xf0, 0x97, 0xa8, 0x0a, 0x75, 0xef, 0x52, 0x49, 0x55, 0xba, 0x6a, 0xbb, 0xf7, 0x0a, 0x36, 0xb2,
    -	0x32, 0x58, 0xd1, 0xa0, 0x07, 0x04, 0x61, 0x81, 0xb4, 0x75, 0xb6, 0x63, 0x9d, 0x9c, 0x7e, 0xc5,
    -	0x5c, 0xf9, 0x21, 0x93, 0xd4, 0xc1, 0xda, 0x0a, 0xca, 0x69, 0x24, 0x43, 0xc5, 0x47, 0x68, 0x51,
    -	0x44, 0xcc, 0xd5, 0x19, 0x7b, 0x63, 0xbe, 0x8c, 0xb5, 0x23, 0xe6, 0xe6, 0x85, 0x83, 0x2f, 0xa2,
    -	0x50, 0x70, 0x07, 0xdd, 0x10, 0x92, 0xca, 0x58, 0xa8, 0xb2, 0xd5, 0x76, 0xef, 0xce, 0x89, 0xa7,
    -	0x74, 0x9c, 0x55, 0x8d, 0x78, 0x23, 0xf9, 0x26, 0x1a, 0xcb, 0xfc, 0xbe, 0x84, 0x56, 0xc7, 0x6b,
    -	0x85, 0xdf, 0x44, 0x35, 0xc1, 0xf8, 0x99, 0xe7, 0xb2, 0x63, 0x3a, 0x64, 0xba, 0x95, 0xfe, 0xaf,
    -	0xf5, 0x6b, 0xed, 0x9c, 0x45, 0x8a, 0x72, 0xb8, 0x97, 0xa9, 0xb5, 0x42, 0x2e, 0x75, 0xd0, 0x57,
    -	0xa7, 0x14, 0x3a, 0xdb, 0x4a, 0x3a, 0xdb, 0x3a, 0x08, 0xe4, 0x09, 0x6f, 0x4b, 0xee, 0x05, 0xbd,
    -	0x29, 0x43, 0x00, 0x46, 0x8a, 0xc8, 0xf8, 0x53, 0x54, 0xe5, 0x4c, 0x84, 0x31, 0x77, 0x99, 0x4e,
    -	0xc5, 0x58, 0x33, 0xc2, 0x0a, 0x80, 0x32, 0x41, 0xdf, 0x76, 0x8f, 0x42, 0x97, 0xfa, 0x49, 0x71,
    -	0x08, 0x7b, 0xc4, 0x38, 0x0b, 0x5c, 0xe6, 0xac, 0x40, 0xc3, 0x13, 0x0d, 0x41, 0x32, 0x30, 0x18,
    -	0xa8, 0x15, 0x9d, 0x8b, 0x3d, 0x9f, 0xbe, 0x94, 0x16, 0xf9, 0x68, 0xac, 0x45, 0xec, 0xf9, 0x4a,
    -	0xaa, 0x9c, 0xbb, 0xaa, 0x4f, 0xcc, 0x3f, 0x0c, 0xb4, 0x56, 0x14, 0x3c, 0xf2, 0x84, 0xc4, 0x5f,
    -	0x4c, 0x45, 0x62, 0xcd, 0x17, 0x09, 0x68, 0xab, 0x38, 0xd6, 0xb4, 0xa9, 0x6a, 0x4a, 0x29, 0x44,
    -	0xd1, 0x42, 0x15, 0x4f, 0xb2, 0xa1, 0xa8, 0x97, 0xd4, 0xac, 0xde, 0xb9, 0x46, 0x18, 0xf9, 0xa0,
    -	0x1e, 0x00, 0x02, 0x49, 0x80, 0xcc, 0xbf, 0x0c, 0xd4, 0x2c, 0x8a, 0xb5, 0x28, 0xa7, 0x43, 0x26,
    -	0x19, 0x17, 0x59, 0x19, 0xf1, 0x16, 0xaa, 0xd2, 0xd6, 0xc1, 0x07, 0x3c, 0x8c, 0xa3, 0x74, 0xdf,
    -	0x81, 0x7f, 0xef, 0x69, 0x1a, 0xc9, 0xb8, 0xb0, 0x15, 0x07, 0x9e, 0x5e, 0x5d, 0x85, 0xad, 0x78,
    -	0xe8, 0x05, 0x5d, 0xa2, 0x38, 0x20, 0x11, 0x40, 0xb3, 0x97, 0xc7, 0x25, 0x54, 0x97, 0x2b, 0x0e,
    -	0x6e, 0xa2, 0x8a, 0x70, 0xc3, 0x88, 0xd5, 0x17, 0x95, 0xc8, 0x32, 0xb8, 0xdc, 0x06, 0x02, 0x49,
    -	0xe8, 0xf8, 0x0e, 0x5a, 0x06, 0x41, 0x11, 0x51, 0x97, 0xd5, 0x2b, 0x4a, 0xe8, 0xe6, 0xe8, 0xa2,
    -	0xb9, 0x7c, 0x9c, 0x12, 0x49, 0xce, 0x37, 0x7f, 0x99, 0x28, 0x12, 0xd4, 0x0f, 0xef, 0x22, 0xe4,
    -	0x86, 0x81, 0xe4, 0xa1, 0xef, 0x33, 0xae, 0x43, 0xca, 0xda, 0x67, 0x2f, 0xe3, 0x90, 0x82, 0x14,
    -	0x0e, 0x10, 0x8a, 0xb2, 0xdc, 0xe8, 0x36, 0x7a, 0xe7, 0x1a, 0xf9, 0x9f, 0x91, 0x58, 0x67, 0x15,
    -	0xec, 0x15, 0x18, 0x05, 0x0b, 0xe6, 0xaf, 0x06, 0xaa, 0x69, 0xfd, 0x97, 0xd0, 0x58, 0x87, 0xe3,
    -	0x8d, 0xf5, 0xda, 0x9c, 0x8f, 0xce, 0xec, 0x9e, 0xfa, 0xcd, 0x40, 0x1b, 0xa9, 0xeb, 0x21, 0xed,
    -	0x3a, 0xd4, 0xa7, 0x81, 0xcb, 0x78, 0xfa, 0x1e, 0x6c, 0xa0, 0x92, 0x97, 0x36, 0x12, 0xd2, 0x00,
    -	0xa5, 0x83, 0x16, 0x29, 0x79, 0x11, 0xbe, 0x8b, 0xaa, 0xfd, 0x50, 0x48, 0xd5, 0x22, 0x49, 0x13,
    -	0x65, 0x5e, 0xef, 0x6b, 0x3a, 0xc9, 0x24, 0xf0, 0xc7, 0xa8, 0x12, 0x85, 0x5c, 0x8a, 0xfa, 0xa2,
    -	0xf2, 0xfa, 0xde, 0x7c, 0x5e, 0xc3, 0x6e, 0xd3, 0xcb, 0x3a, 0x7f, 0xbc, 0x00, 0x86, 0x24, 0x68,
    -	0xe6, 0xb7, 0x06, 0x7a, 0x65, 0x86, 0xff, 0x89, 0x0e, 0xee, 0xa2, 0x25, 0x2f, 0x61, 0xea, 0x17,
    -	0xf3, 0xed, 0xf9, 0xcc, 0xce, 0x48, 0x45, 0xfe, 0x5a, 0xa7, 0xaf, 0x72, 0x0a, 0x6d, 0xfe, 0x64,
    -	0xa0, 0xff, 0x4d, 0xf9, 0xab, 0xae, 0x0e, 0xd8, 0xf9, 0x90, 0xbc, 0x4a, 0xe1, 0xea, 0x80, 0xd5,
    -	0xad, 0x38, 0xf8, 0x10, 0x55, 0xd5, 0xd1, 0xe2, 0x86, 0xbe, 0x4e, 0xa0, 0x9d, 0x26, 0xb0, 0xa5,
    -	0xe9, 0xcf, 0x2e, 0x9a, 0xaf, 0x4e, 0x5f, 0x72, 0x56, 0xca, 0x26, 0x19, 0x00, 0x8c, 0x22, 0xe3,
    -	0x3c, 0xe4, 0x7a, 0x5a, 0xd5, 0x28, 0xbe, 0x0f, 0x04, 0x92, 0xd0, 0xcd, 0x9f, 0xf3, 0x26, 0x85,
    -	0x83, 0x02, 0xfc, 0x83, 0xe2, 0x4c, 0x5e, 0x45, 0x50, 0x3a, 0xa2, 0x38, 0x38, 0x46, 0x6b, 0xde,
    -	0xc4, 0x05, 0x72, 0xbd, 0x9d, 0x9c, 0xa9, 0x39, 0x75, 0x0d, 0xbf, 0x36, 0xc9, 0x21, 0x53, 0x26,
    -	0x4c, 0x86, 0xa6, 0xa4, 0xe0, 0x49, 0xe8, 0x4b, 0x19, 0xe9, 0x69, 0xba, 0x3f, 0xff, 0xdd, 0x93,
    -	0xbb, 0x50, 0x55, 0xd1, 0x75, 0x3a, 0x2d, 0xa2, 0xa0, 0xcc, 0xdf, 0x4b, 0x59, 0x3e, 0xd4, 0xa2,
    -	0x79, 0x37, 0x8b, 0x56, 0xed, 0x00, 0xf5, 0xcc, 0x27, 0x6b, 0x6d, 0xbd, 0xe0, 0x78, 0xc6, 0x23,
    -	0x53, 0xd2, 0xb8, 0x93, 0xdf, 0x83, 0xc6, 0x7f, 0xb9, 0x07, 0x6b, 0xb3, 0x6e, 0x41, 0xbc, 0x8f,
    -	0xca, 0xd2, 0x4f, 0x87, 0xfd, 0xf5, 0xf9, 0x10, 0x3b, 0x47, 0x6d, 0xa7, 0xa6, 0x53, 0x5e, 0xee,
    -	0x1c, 0xb5, 0x09, 0x40, 0xe0, 0x13, 0x54, 0xe1, 0xb1, 0xcf, 0xe0, 0x56, 0x2a, 0xcf, 0x7f, 0x7b,
    -	0x41, 0x06, 0xf3, 0xe1, 0x83, 0x2f, 0x41, 0x12, 0x1c, 0xf3, 0x3b, 0x03, 0xdd, 0x1c, 0xbb, 0xa8,
    -	0x30, 0x47, 0x2b, 0x7e, 0x61, 0x76, 0x74, 0x1e, 0x1e, 0x5e, 0x7f, 0xea, 0xf4, 0xd0, 0xaf, 0x6b,
    -	0xbb, 0x2b, 0x45, 0x1e, 0x19, 0xb3, 0x61, 0x52, 0x84, 0xf2, 0xb0, 0x61, 0x0e, 0xa0, 0x79, 0x93,
    -	0x81, 0xd7, 0x73, 0x00, 0x3d, 0x2d, 0x48, 0x42, 0x87, 0x07, 0x45, 0x30, 0x97, 0x33, 0x79, 0x9c,
    -	0x2f, 0xae, 0xec, 0x41, 0x69, 0x67, 0x1c, 0x52, 0x90, 0x72, 0xf6, 0xce, 0x2f, 0x1b, 0x0b, 0x4f,
    -	0x2e, 0x1b, 0x0b, 0x4f, 0x2f, 0x1b, 0x0b, 0xdf, 0x8c, 0x1a, 0xc6, 0xf9, 0xa8, 0x61, 0x3c, 0x19,
    -	0x35, 0x8c, 0xa7, 0xa3, 0x86, 0xf1, 0xf7, 0xa8, 0x61, 0xfc, 0xf8, 0x4f, 0x63, 0xe1, 0xf3, 0xdb,
    -	0xcf, 0xfd, 0x87, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0xf6, 0xe9, 0x27, 0x10, 0x0e,
    -	0x00, 0x00,
    +	// 1457 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5,
    +	0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16,
    +	0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c,
    +	0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40,
    +	0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec,
    +	0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54,
    +	0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38,
    +	0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e,
    +	0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34,
    +	0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f,
    +	0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2,
    +	0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90,
    +	0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96,
    +	0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76,
    +	0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0,
    +	0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d,
    +	0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d,
    +	0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7,
    +	0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc,
    +	0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6,
    +	0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64,
    +	0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee,
    +	0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc,
    +	0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad,
    +	0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84,
    +	0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6,
    +	0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a,
    +	0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c,
    +	0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52,
    +	0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95,
    +	0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0,
    +	0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85,
    +	0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c,
    +	0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf,
    +	0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79,
    +	0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93,
    +	0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f,
    +	0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff,
    +	0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4,
    +	0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e,
    +	0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66,
    +	0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d,
    +	0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca,
    +	0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9,
    +	0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04,
    +	0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e,
    +	0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8,
    +	0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1,
    +	0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40,
    +	0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3,
    +	0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44,
    +	0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb,
    +	0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92,
    +	0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c,
    +	0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10,
    +	0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9,
    +	0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4,
    +	0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2,
    +	0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16,
    +	0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff,
    +	0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed,
    +	0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57,
    +	0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55,
    +	0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b,
    +	0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39,
    +	0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c,
    +	0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9,
    +	0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38,
    +	0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7,
    +	0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e,
    +	0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8,
    +	0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf,
    +	0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78,
    +	0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0,
    +	0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9,
    +	0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce,
    +	0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9,
    +	0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b,
    +	0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99,
    +	0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34,
    +	0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e,
    +	0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37,
    +	0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf,
    +	0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f,
    +	0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a,
    +	0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e,
    +	0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58,
    +	0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79,
    +	0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb,
    +	0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97,
    +	0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00,
    +	0x00,
     }
     
     func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
    @@ -709,7 +956,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *Ingress) Marshal() (dAtA []byte, err error) {
    +func (m *IPAddress) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -719,18 +966,18 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
     	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -738,9 +985,9 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
     	i--
    -	dAtA[i] = 0x1a
    +	dAtA[i] = 0x12
     	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -748,9 +995,46 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
     	i--
    -	dAtA[i] = 0x12
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
     	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
     			return 0, err
     		}
    @@ -762,7 +1046,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
    +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -772,19 +1056,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if m.Resource != nil {
    +	if m.ParentRef != nil {
     		{
    -			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
    +			size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
     			if err != nil {
     				return 0, err
     			}
    @@ -792,27 +1076,12 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     			i = encodeVarintGenerated(dAtA, i, uint64(size))
     		}
     		i--
    -		dAtA[i] = 0x1a
    -	}
    -	{
    -		size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		dAtA[i] = 0xa
     	}
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.ServiceName)
    -	copy(dAtA[i:], m.ServiceName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName)))
    -	i--
    -	dAtA[i] = 0xa
     	return len(dAtA) - i, nil
     }
     
    -func (m *IngressClass) Marshal() (dAtA []byte, err error) {
    +func (m *Ingress) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -822,16 +1091,26 @@ func (m *IngressClass) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) {
    +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
     	{
     		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
     		if err != nil {
    @@ -855,7 +1134,7 @@ func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func (m *IngressClassList) Marshal() (dAtA []byte, err error) {
    +func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
     	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    @@ -865,23 +1144,116 @@ func (m *IngressClassList) Marshal() (dAtA []byte, err error) {
     	return dAtA[:n], nil
     }
     
    -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) {
    +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
     	size := m.Size()
     	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	i := len(dAtA)
     	_ = i
     	var l int
     	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    +	if m.Resource != nil {
    +		{
    +			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	{
    +		size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.ServiceName)
    +	copy(dAtA[i:], m.ServiceName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *IngressClass) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *IngressClassList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
     				i -= size
     				i = encodeVarintGenerated(dAtA, i, uint64(size))
     			}
    @@ -1378,108 +1750,363 @@ func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    -	offset -= sovGenerated(v)
    -	base := offset
    -	for v >= 1<<7 {
    -		dAtA[offset] = uint8(v&0x7f | 0x80)
    -		v >>= 7
    -		offset++
    +func (m *ParentReference) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	dAtA[offset] = uint8(v)
    -	return base
    +	return dAtA[:n], nil
     }
    -func (m *HTTPIngressPath) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Path)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Backend.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.PathType != nil {
    -		l = len(*m.PathType)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    +
    +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
     }
     
    -func (m *HTTPIngressRuleValue) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	if len(m.Paths) > 0 {
    -		for _, e := range m.Paths {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Namespace)
    +	copy(dAtA[i:], m.Namespace)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Resource)
    +	copy(dAtA[i:], m.Resource)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Group)
    +	copy(dAtA[i:], m.Group)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *Ingress) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Status.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *IngressBackend) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	l = len(m.ServiceName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.ServicePort.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.Resource != nil {
    -		l = m.Resource.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
     	}
    -	return n
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
     }
     
    -func (m *IngressClass) Size() (n int) {
    -	if m == nil {
    -		return 0
    +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
     	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    +	return dAtA[:n], nil
     }
     
    -func (m *IngressClassList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
     	var l int
     	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
     	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.CIDRs) > 0 {
    +		for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.CIDRs[iNdEx])
    +			copy(dAtA[i:], m.CIDRs[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Conditions) > 0 {
    +		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *HTTPIngressPath) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Path)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Backend.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.PathType != nil {
    +		l = len(*m.PathType)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *HTTPIngressRuleValue) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Paths) > 0 {
    +		for _, e := range m.Paths {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *IPAddress) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *IPAddressList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *IPAddressSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.ParentRef != nil {
    +		l = m.ParentRef.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *Ingress) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *IngressBackend) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.ServiceName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.ServicePort.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Resource != nil {
    +		l = m.Resource.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *IngressClass) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *IngressClassList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
     	return n
     }
     
    @@ -1673,31 +2300,110 @@ func (m *IngressTLS) Size() (n int) {
     	return n
     }
     
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    -}
    -func (this *HTTPIngressPath) String() string {
    -	if this == nil {
    -		return "nil"
    +func (m *ParentReference) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	s := strings.Join([]string{`&HTTPIngressPath{`,
    -		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
    -		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
    -		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    +	var l int
    +	_ = l
    +	l = len(m.Group)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Resource)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Namespace)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
     }
    -func (this *HTTPIngressRuleValue) String() string {
    -	if this == nil {
    -		return "nil"
    +
    +func (m *ServiceCIDR) Size() (n int) {
    +	if m == nil {
    +		return 0
     	}
    -	repeatedStringForPaths := "[]HTTPIngressPath{"
    -	for _, f := range this.Paths {
    -		repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ServiceCIDRList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ServiceCIDRSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.CIDRs) > 0 {
    +		for _, s := range m.CIDRs {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ServiceCIDRStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Conditions) > 0 {
    +		for _, e := range m.Conditions {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *HTTPIngressPath) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&HTTPIngressPath{`,
    +		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
    +		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
    +		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *HTTPIngressRuleValue) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForPaths := "[]HTTPIngressPath{"
    +	for _, f := range this.Paths {
    +		repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
     	}
     	repeatedStringForPaths += "}"
     	s := strings.Join([]string{`&HTTPIngressRuleValue{`,
    @@ -1706,6 +2412,43 @@ func (this *HTTPIngressRuleValue) String() string {
     	}, "")
     	return s
     }
    +func (this *IPAddress) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&IPAddress{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *IPAddressList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]IPAddress{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&IPAddressList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *IPAddressSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&IPAddressSpec{`,
    +		`ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *Ingress) String() string {
     	if this == nil {
     		return "nil"
    @@ -1900,22 +2643,1172 @@ func (this *IngressTLS) String() string {
     	if this == nil {
     		return "nil"
     	}
    -	s := strings.Join([]string{`&IngressTLS{`,
    -		`Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`,
    -		`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func valueToStringGenerated(v interface{}) string {
    -	rv := reflect.ValueOf(v)
    -	if rv.IsNil() {
    -		return "nil"
    +	s := strings.Join([]string{`&IngressTLS{`,
    +		`Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`,
    +		`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ParentReference) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ParentReference{`,
    +		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
    +		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    +		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDR) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ServiceCIDR{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ServiceCIDR{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ServiceCIDRList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ServiceCIDRSpec{`,
    +		`CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ServiceCIDRStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForConditions := "[]Condition{"
    +	for _, f := range this.Conditions {
    +		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
    +	}
    +	repeatedStringForConditions += "}"
    +	s := strings.Join([]string{`&ServiceCIDRStatus{`,
    +		`Conditions:` + repeatedStringForConditions + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Path = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := PathType(dAtA[iNdEx:postIndex])
    +			m.PathType = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Paths = append(m.Paths, HTTPIngressPath{})
    +			if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IPAddress) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IPAddressList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, IPAddress{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.ParentRef == nil {
    +				m.ParentRef = &ParentReference{}
    +			}
    +			if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *Ingress) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressBackend) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ServiceName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Resource == nil {
    +				m.Resource = &v11.TypedLocalObjectReference{}
    +			}
    +			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressClass) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressClassList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, IngressClass{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
     	}
    -	pv := reflect.Indirect(rv).Interface()
    -	return fmt.Sprintf("*%v", pv)
    +	return nil
     }
    -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
    +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -1938,15 +3831,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -1974,13 +3867,14 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Path = string(dAtA[iNdEx:postIndex])
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.APIGroup = &s
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -1990,28 +3884,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Kind = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -2039,64 +3932,46 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := PathType(dAtA[iNdEx:postIndex])
    -			m.PathType = &s
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
     			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
     			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
     			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
     			}
    -			if iNdEx >= l {
    +			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Scope = &s
    +			iNdEx = postIndex
    +		case 5:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -2106,25 +3981,24 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Paths = append(m.Paths, HTTPIngressPath{})
    -			if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Namespace = &s
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -2147,7 +4021,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *Ingress) Unmarshal(dAtA []byte) error {
    +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2170,17 +4044,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -2190,28 +4064,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.Controller = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2238,40 +4111,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			if m.Parameters == nil {
    +				m.Parameters = &IngressClassParametersReference{}
     			}
    -			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -2296,7 +4139,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressBackend) Unmarshal(dAtA []byte) error {
    +func (m *IngressList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2319,47 +4162,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ServiceName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2386,13 +4197,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 3:
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2419,10 +4230,8 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Resource == nil {
    -				m.Resource = &v11.TypedLocalObjectReference{}
    -			}
    -			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Items = append(m.Items, Ingress{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -2447,7 +4256,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClass) Unmarshal(dAtA []byte) error {
    +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2470,17 +4279,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -2490,28 +4299,59 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.IP = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Hostname = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2538,7 +4378,8 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Ports = append(m.Ports, IngressPortStatus{})
    +			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -2563,7 +4404,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassList) Unmarshal(dAtA []byte) error {
    +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2586,48 +4427,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2654,8 +4462,8 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Items = append(m.Items, IngressClass{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
    +			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -2680,7 +4488,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
    +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2703,17 +4511,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
     			}
    -			var stringLen uint64
    +			m.Port = 0
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -2723,28 +4531,14 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				m.Port |= int32(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.APIGroup = &s
    -			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -2772,11 +4566,11 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Kind = string(dAtA[iNdEx:postIndex])
    +			m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -2804,11 +4598,62 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.Error = &s
     			iNdEx = postIndex
    -		case 4:
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressRule) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -2836,14 +4681,13 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Scope = &s
    +			m.Host = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 5:
    +		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -2853,24 +4697,24 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Namespace = &s
    +			if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -2893,7 +4737,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
    +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -2912,51 +4756,19 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     			if b < 0x80 {
     				break
     			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Controller = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -2983,10 +4795,10 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Parameters == nil {
    -				m.Parameters = &IngressClassParametersReference{}
    +			if m.HTTP == nil {
    +				m.HTTP = &HTTPIngressRuleValue{}
     			}
    -			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3011,7 +4823,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressList) Unmarshal(dAtA []byte) error {
    +func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3034,15 +4846,15 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3069,13 +4881,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if m.Backend == nil {
    +				m.Backend = &IngressBackend{}
    +			}
    +			if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3102,66 +4917,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Items = append(m.Items, Ingress{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.TLS = append(m.TLS, IngressTLS{})
    +			if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3171,27 +4936,29 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.IP = string(dAtA[iNdEx:postIndex])
    +			m.Rules = append(m.Rules, IngressRule{})
    +			if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
    -		case 2:
    +		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -3219,11 +4986,62 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Hostname = string(dAtA[iNdEx:postIndex])
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.IngressClassName = &s
     			iNdEx = postIndex
    -		case 4:
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *IngressStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3250,8 +5068,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ports = append(m.Ports, IngressPortStatus{})
    -			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3276,7 +5093,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
    +func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3299,17 +5116,17 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3319,25 +5136,55 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
    -			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    +			m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
     			}
    +			m.SecretName = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3360,7 +5207,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
    +func (m *ParentReference) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3383,17 +5230,49 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Group = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
     			}
    -			m.Port = 0
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3403,14 +5282,27 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				m.Port |= int32(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -		case 2:
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resource = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -3438,11 +5330,11 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
    +			m.Namespace = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
    -		case 3:
    +		case 4:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
     			}
     			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
    @@ -3470,8 +5362,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.Error = &s
    +			m.Name = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3494,7 +5385,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressRule) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3517,17 +5408,17 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3537,27 +5428,28 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Host = string(dAtA[iNdEx:postIndex])
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3584,63 +5476,13 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    +		case 3:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3667,10 +5509,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.HTTP == nil {
    -				m.HTTP = &HTTPIngressRuleValue{}
    -			}
    -			if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    @@ -3695,7 +5534,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressSpec) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3718,15 +5557,15 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3753,50 +5592,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if m.Backend == nil {
    -				m.Backend = &IngressBackend{}
    -			}
    -			if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
     		case 2:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.TLS = append(m.TLS, IngressTLS{})
    -			if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
     			}
     			var msglen int
     			for shift := uint(0); ; shift += 7 {
    @@ -3823,44 +5625,11 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Rules = append(m.Rules, IngressRule{})
    -			if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +			m.Items = append(m.Items, ServiceCIDR{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
     				return err
     			}
     			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.IngressClassName = &s
    -			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
     			skippy, err := skipGenerated(dAtA[iNdEx:])
    @@ -3882,7 +5651,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressStatus) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3905,17 +5674,17 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
     			}
    -			var msglen int
    +			var stringLen uint64
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -3925,24 +5694,23 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    +				stringLen |= uint64(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			if msglen < 0 {
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + msglen
    +			postIndex := iNdEx + intStringLen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    +			m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    @@ -3965,7 +5733,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    -func (m *IngressTLS) Unmarshal(dAtA []byte) error {
    +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
     	for iNdEx < l {
    @@ -3988,17 +5756,17 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     		fieldNum := int32(wire >> 3)
     		wireType := int(wire & 0x7)
     		if wireType == 4 {
    -			return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
    +			return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
     		}
     		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
    +			return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
     		}
     		switch fieldNum {
     		case 1:
     			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
    +				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
     			}
    -			var stringLen uint64
    +			var msglen int
     			for shift := uint(0); ; shift += 7 {
     				if shift >= 64 {
     					return ErrIntOverflowGenerated
    @@ -4008,55 +5776,25 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
     				}
     				b := dAtA[iNdEx]
     				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    +				msglen |= int(b&0x7F) << shift
     				if b < 0x80 {
     					break
     				}
     			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    +			if msglen < 0 {
     				return ErrInvalidLengthGenerated
     			}
    -			postIndex := iNdEx + intStringLen
    +			postIndex := iNdEx + msglen
     			if postIndex < 0 {
     				return ErrInvalidLengthGenerated
     			}
     			if postIndex > l {
     				return io.ErrUnexpectedEOF
     			}
    -			m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    +			m.Conditions = append(m.Conditions, v1.Condition{})
    +			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
     			}
    -			m.SecretName = string(dAtA[iNdEx:postIndex])
     			iNdEx = postIndex
     		default:
     			iNdEx = preIndex
    diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto
    index f36df9ec19..3368dcaec3 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto
    @@ -73,6 +73,44 @@ message HTTPIngressRuleValue {
       repeated HTTPIngressPath paths = 1;
     }
     
    +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    +// the name of the object is the IP address in canonical format, four decimal digits separated
    +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    +message IPAddress {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec is the desired state of the IPAddress.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional IPAddressSpec spec = 2;
    +}
    +
    +// IPAddressList contains a list of IPAddress.
    +message IPAddressList {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is the list of IPAddresses.
    +  repeated IPAddress items = 2;
    +}
    +
    +// IPAddressSpec describe the attributes in an IP Address.
    +message IPAddressSpec {
    +  // ParentRef references the resource that an IPAddress is attached to.
    +  // An IPAddress must reference a parent object.
    +  // +required
    +  optional ParentReference parentRef = 1;
    +}
    +
     // Ingress is a collection of rules that allow inbound connections to reach the
     // endpoints defined by a backend. An Ingress can be configured to give services
     // externally-reachable urls, load balance traffic, terminate SSL, offer name
    @@ -81,7 +119,7 @@ message Ingress {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the Ingress.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -102,13 +140,13 @@ message IngressBackend {
     
       // servicePort Specifies the port of the referenced service.
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
     
       // resource is an ObjectRef to another Kubernetes resource in the namespace
       // of the Ingress object. If resource is specified, serviceName and servicePort
       // must not be specified.
       // +optional
    -  optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
    +  optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
     }
     
     // IngressClass represents the class of the Ingress, referenced by the Ingress
    @@ -120,7 +158,7 @@ message IngressClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the desired state of the IngressClass.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -132,7 +170,7 @@ message IngressClass {
     message IngressClassList {
       // Standard list metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of IngressClasses.
       repeated IngressClass items = 2;
    @@ -186,7 +224,7 @@ message IngressList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of Ingress.
       repeated Ingress items = 2;
    @@ -351,3 +389,74 @@ message IngressTLS {
       optional string secretName = 2;
     }
     
    +// ParentReference describes a reference to a parent object.
    +message ParentReference {
    +  // Group is the group of the object being referenced.
    +  // +optional
    +  optional string group = 1;
    +
    +  // Resource is the resource of the object being referenced.
    +  // +required
    +  optional string resource = 2;
    +
    +  // Namespace is the namespace of the object being referenced.
    +  // +optional
    +  optional string namespace = 3;
    +
    +  // Name is the name of the object being referenced.
    +  // +required
    +  optional string name = 4;
    +}
    +
    +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    +// This range is used to allocate ClusterIPs to Service objects.
    +message ServiceCIDR {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // spec is the desired state of the ServiceCIDR.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional ServiceCIDRSpec spec = 2;
    +
    +  // status represents the current state of the ServiceCIDR.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +  // +optional
    +  optional ServiceCIDRStatus status = 3;
    +}
    +
    +// ServiceCIDRList contains a list of ServiceCIDR objects.
    +message ServiceCIDRList {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is the list of ServiceCIDRs.
    +  repeated ServiceCIDR items = 2;
    +}
    +
    +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    +message ServiceCIDRSpec {
    +  // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    +  // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +  // This field is immutable.
    +  // +optional
    +  // +listType=atomic
    +  repeated string cidrs = 1;
    +}
    +
    +// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    +message ServiceCIDRStatus {
    +  // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    +  // Current service state
    +  // +optional
    +  // +patchMergeKey=type
    +  // +patchStrategy=merge
    +  // +listType=map
    +  // +listMapKey=type
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
    +}
    +
    diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go
    index 04234953e6..9d2a13cc68 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/register.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/register.go
    @@ -51,6 +51,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     		&IngressList{},
     		&IngressClass{},
     		&IngressClassList{},
    +		&IPAddress{},
    +		&IPAddressList{},
    +		&ServiceCIDR{},
    +		&ServiceCIDRList{},
     	)
     	// Add the watch version that applies
     	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go
    index 34dfe76aa3..cd7126a5a8 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/types.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/types.go
    @@ -218,7 +218,7 @@ type IngressRule struct {
     	// default backend, is left to the controller fulfilling the Ingress. Http is
     	// currently the only supported IngressRuleValue.
     	// +optional
    -	IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
    +	IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"`
     }
     
     // IngressRuleValue represents a rule to apply against incoming requests. If the
    @@ -421,3 +421,133 @@ type IngressClassList struct {
     	// items is the list of IngressClasses.
     	Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
    +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
    +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
    +// the name of the object is the IP address in canonical format, four decimal digits separated
    +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
    +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
    +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
    +type IPAddress struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// spec is the desired state of the IPAddress.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +}
    +
    +// IPAddressSpec describe the attributes in an IP Address.
    +type IPAddressSpec struct {
    +	// ParentRef references the resource that an IPAddress is attached to.
    +	// An IPAddress must reference a parent object.
    +	// +required
    +	ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
    +}
    +
    +// ParentReference describes a reference to a parent object.
    +type ParentReference struct {
    +	// Group is the group of the object being referenced.
    +	// +optional
    +	Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
    +	// Resource is the resource of the object being referenced.
    +	// +required
    +	Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
    +	// Namespace is the namespace of the object being referenced.
    +	// +optional
    +	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
    +	// Name is the name of the object being referenced.
    +	// +required
    +	Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// IPAddressList contains a list of IPAddress.
    +type IPAddressList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// items is the list of IPAddresses.
    +	Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
    +// This range is used to allocate ClusterIPs to Service objects.
    +type ServiceCIDR struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// spec is the desired state of the ServiceCIDR.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
    +	// status represents the current state of the ServiceCIDR.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    +	// +optional
    +	Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
    +type ServiceCIDRSpec struct {
    +	// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
    +	// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
    +	// This field is immutable.
    +	// +optional
    +	// +listType=atomic
    +	CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
    +}
    +
    +const (
    +	// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
    +	// apiserver to allocate ClusterIPs for Services.
    +	ServiceCIDRConditionReady = "Ready"
    +	// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
    +	// being deleted.
    +	ServiceCIDRReasonTerminating = "Terminating"
    +)
    +
    +// ServiceCIDRStatus describes the current state of the ServiceCIDR.
    +type ServiceCIDRStatus struct {
    +	// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
    +	// Current service state
    +	// +optional
    +	// +patchMergeKey=type
    +	// +patchStrategy=merge
    +	// +listType=map
    +	// +listMapKey=type
    +	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// ServiceCIDRList contains a list of ServiceCIDR objects.
    +type ServiceCIDRList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +	// items is the list of ServiceCIDRs.
    +	Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
    index b2373669fe..9d27517f3b 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
    @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
     	return map_HTTPIngressRuleValue
     }
     
    +var map_IPAddress = map[string]string{
    +	"":         "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (IPAddress) SwaggerDoc() map[string]string {
    +	return map_IPAddress
    +}
    +
    +var map_IPAddressList = map[string]string{
    +	"":         "IPAddressList contains a list of IPAddress.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is the list of IPAddresses.",
    +}
    +
    +func (IPAddressList) SwaggerDoc() map[string]string {
    +	return map_IPAddressList
    +}
    +
    +var map_IPAddressSpec = map[string]string{
    +	"":          "IPAddressSpec describe the attributes in an IP Address.",
    +	"parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
    +}
    +
    +func (IPAddressSpec) SwaggerDoc() map[string]string {
    +	return map_IPAddressSpec
    +}
    +
     var map_Ingress = map[string]string{
     	"":         "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.",
     	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    @@ -201,4 +230,55 @@ func (IngressTLS) SwaggerDoc() map[string]string {
     	return map_IngressTLS
     }
     
    +var map_ParentReference = map[string]string{
    +	"":          "ParentReference describes a reference to a parent object.",
    +	"group":     "Group is the group of the object being referenced.",
    +	"resource":  "Resource is the resource of the object being referenced.",
    +	"namespace": "Namespace is the namespace of the object being referenced.",
    +	"name":      "Name is the name of the object being referenced.",
    +}
    +
    +func (ParentReference) SwaggerDoc() map[string]string {
    +	return map_ParentReference
    +}
    +
    +var map_ServiceCIDR = map[string]string{
    +	"":         "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"spec":     "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +	"status":   "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
    +}
    +
    +func (ServiceCIDR) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDR
    +}
    +
    +var map_ServiceCIDRList = map[string]string{
    +	"":         "ServiceCIDRList contains a list of ServiceCIDR objects.",
    +	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is the list of ServiceCIDRs.",
    +}
    +
    +func (ServiceCIDRList) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRList
    +}
    +
    +var map_ServiceCIDRSpec = map[string]string{
    +	"":      "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
    +	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
    +}
    +
    +func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRSpec
    +}
    +
    +var map_ServiceCIDRStatus = map[string]string{
    +	"":           "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
    +	"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
    +}
    +
    +func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
    +	return map_ServiceCIDRStatus
    +}
    +
     // AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go
    new file mode 100644
    index 0000000000..bc2207766f
    --- /dev/null
    +++ b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go
    @@ -0,0 +1,33 @@
    +/*
    +Copyright 2023 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta1
    +
    +const (
    +
    +	// TODO: Use IPFamily as field with a field selector,And the value is set based on
    +	// the name at create time and immutable.
    +	// LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress.
    +	// This label simplify dual-stack client operations allowing to obtain the list of
    +	// IP addresses filtered by family.
    +	LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family"
    +	// LabelManagedBy is used to indicate the controller or entity that manages
    +	// an IPAddress. This label aims to enable different IPAddress
    +	// objects to be managed by different controllers or entities within the
    +	// same cluster. It is highly recommended to configure this label for all
    +	// IPAddress objects.
    +	LabelManagedBy = "ipaddress.kubernetes.io/managed-by"
    +)
    diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
    index 005d64e7fd..1a6869cd6d 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
    @@ -23,6 +23,7 @@ package v1beta1
     
     import (
     	v1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	runtime "k8s.io/apimachinery/pkg/runtime"
     )
     
    @@ -71,6 +72,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddress) DeepCopyInto(out *IPAddress) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
    +func (in *IPAddress) DeepCopy() *IPAddress {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddress)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *IPAddress) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]IPAddress, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
    +func (in *IPAddressList) DeepCopy() *IPAddressList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddressList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *IPAddressList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
    +	*out = *in
    +	if in.ParentRef != nil {
    +		in, out := &in.ParentRef, &out.ParentRef
    +		*out = new(ParentReference)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
    +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(IPAddressSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *Ingress) DeepCopyInto(out *Ingress) {
     	*out = *in
    @@ -448,3 +530,124 @@ func (in *IngressTLS) DeepCopy() *IngressTLS {
     	in.DeepCopyInto(out)
     	return out
     }
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ParentReference) DeepCopyInto(out *ParentReference) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
    +func (in *ParentReference) DeepCopy() *ParentReference {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ParentReference)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
    +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDR)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ServiceCIDR, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
    +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
    +	*out = *in
    +	if in.CIDRs != nil {
    +		in, out := &in.CIDRs, &out.CIDRs
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
    +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
    +	*out = *in
    +	if in.Conditions != nil {
    +		in, out := &in.Conditions, &out.Conditions
    +		*out = make([]metav1.Condition, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
    +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ServiceCIDRStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go
    index e8b4c7ec7f..a876fd5fe0 100644
    --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go
    @@ -25,6 +25,42 @@ import (
     	schema "k8s.io/apimachinery/pkg/runtime/schema"
     )
     
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *IPAddress) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    +
     // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
     // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
     func (in *Ingress) APILifecycleIntroduced() (major, minor int) {
    @@ -120,3 +156,39 @@ func (in *IngressList) APILifecycleReplacement() schema.GroupVersionKind {
     func (in *IngressList) APILifecycleRemoved() (major, minor int) {
     	return 1, 22
     }
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go
    index 12cbcb8a0e..57ca52445b 100644
    --- a/vendor/k8s.io/api/node/v1/doc.go
    +++ b/vendor/k8s.io/api/node/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=node.k8s.io
     
     package v1 // import "k8s.io/api/node/v1"
    diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto
    index 0152d5e3ab..e6b8852ec1 100644
    --- a/vendor/k8s.io/api/node/v1/generated.proto
    +++ b/vendor/k8s.io/api/node/v1/generated.proto
    @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1";
     message Overhead {
       // podFixed represents the fixed resource overhead associated with running a pod.
       // +optional
    -  map podFixed = 1;
    +  map podFixed = 1;
     }
     
     // RuntimeClass defines a class of container runtime supported in the cluster.
    @@ -47,7 +47,7 @@ message Overhead {
     message RuntimeClass {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // handler specifies the underlying runtime and configuration that the CRI
       // implementation will use to handle pods of this class. The possible values
    @@ -80,7 +80,7 @@ message RuntimeClassList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated RuntimeClass items = 2;
    @@ -103,6 +103,6 @@ message Scheduling {
       // tolerated by the pod and the RuntimeClass.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.Toleration tolerations = 2;
    +  repeated .k8s.io.api.core.v1.Toleration tolerations = 2;
     }
     
    diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go
    index b00f58772c..169862ea94 100644
    --- a/vendor/k8s.io/api/node/v1/types.go
    +++ b/vendor/k8s.io/api/node/v1/types.go
    @@ -24,6 +24,7 @@ import (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.20
     
     // RuntimeClass defines a class of container runtime supported in the cluster.
     // The RuntimeClass is used to determine which container runtime is used to run
    @@ -93,6 +94,7 @@ type Scheduling struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.20
     
     // RuntimeClassList is a list of RuntimeClass objects.
     type RuntimeClassList struct {
    diff --git a/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..7497955688
    --- /dev/null
    +++ b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RuntimeClass) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 20
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RuntimeClassList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 20
    +}
    diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto
    index 4673e9261d..bc68718d90 100644
    --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto
    @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1alpha1";
     message Overhead {
       // podFixed represents the fixed resource overhead associated with running a pod.
       // +optional
    -  map podFixed = 1;
    +  map podFixed = 1;
     }
     
     // RuntimeClass defines a class of container runtime supported in the cluster.
    @@ -47,7 +47,7 @@ message Overhead {
     message RuntimeClass {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents specification of the RuntimeClass
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    @@ -59,7 +59,7 @@ message RuntimeClassList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated RuntimeClass items = 2;
    @@ -113,6 +113,6 @@ message Scheduling {
       // tolerated by the pod and the RuntimeClass.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.Toleration tolerations = 2;
    +  repeated .k8s.io.api.core.v1.Toleration tolerations = 2;
     }
     
    diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto
    index 54dbc0995a..497027e033 100644
    --- a/vendor/k8s.io/api/node/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto
    @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1beta1";
     message Overhead {
       // podFixed represents the fixed resource overhead associated with running a pod.
       // +optional
    -  map podFixed = 1;
    +  map podFixed = 1;
     }
     
     // RuntimeClass defines a class of container runtime supported in the cluster.
    @@ -47,7 +47,7 @@ message Overhead {
     message RuntimeClass {
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // handler specifies the underlying runtime and configuration that the CRI
       // implementation will use to handle pods of this class. The possible values
    @@ -80,7 +80,7 @@ message RuntimeClassList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is a list of schema objects.
       repeated RuntimeClass items = 2;
    @@ -103,6 +103,6 @@ message Scheduling {
       // tolerated by the pod and the RuntimeClass.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.Toleration tolerations = 2;
    +  repeated .k8s.io.api.core.v1.Toleration tolerations = 2;
     }
     
    diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go
    index 177cdf5236..c51e02685a 100644
    --- a/vendor/k8s.io/api/policy/v1/doc.go
    +++ b/vendor/k8s.io/api/policy/v1/doc.go
    @@ -17,6 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     // Package policy is for any kind of policy object.  Suitable examples, even if
     // they aren't all here, are PodDisruptionBudget,
    diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto
    index a79e710284..57128e8112 100644
    --- a/vendor/k8s.io/api/policy/v1/generated.proto
    +++ b/vendor/k8s.io/api/policy/v1/generated.proto
    @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1";
     message Eviction {
       // ObjectMeta describes the pod that is being evicted.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // DeleteOptions may be provided
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
     }
     
     // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
    @@ -47,7 +47,7 @@ message PodDisruptionBudget {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the PodDisruptionBudget.
       // +optional
    @@ -63,7 +63,7 @@ message PodDisruptionBudgetList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of PodDisruptionBudgets
       repeated PodDisruptionBudget items = 2;
    @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec {
       // absence of the evicted pod.  So for example you can prevent all voluntary
       // evictions by specifying "100%".
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
     
       // Label query over pods whose evictions are managed by the disruption
       // budget.
    @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec {
       // all pods within the namespace.
       // +patchStrategy=replace
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // An eviction is allowed if at most "maxUnavailable" pods selected by
       // "selector" are unavailable after the eviction, i.e. even in absence of
       // the evicted pod. For example, one can prevent all voluntary evictions
       // by specifying 0. This is a mutually exclusive setting with "minAvailable".
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
     
       // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
       // should be considered for eviction. Current implementation considers healthy pods,
    @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus {
       // If everything goes smooth this map should be empty for the most of the time.
       // Large number of entries in the map may indicate problems with pod deletions.
       // +optional
    -  map disruptedPods = 2;
    +  map disruptedPods = 2;
     
       // Number of pod disruptions that are currently allowed.
       optional int32 disruptionsAllowed = 3;
    @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus {
       // +patchStrategy=merge
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7;
     }
     
    diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go
    index 45b9550f4a..f05367ebe4 100644
    --- a/vendor/k8s.io/api/policy/v1/types.go
    +++ b/vendor/k8s.io/api/policy/v1/types.go
    @@ -170,6 +170,7 @@ const (
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
     type PodDisruptionBudget struct {
    @@ -188,6 +189,7 @@ type PodDisruptionBudget struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.21
     
     // PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
     type PodDisruptionBudgetList struct {
    @@ -203,6 +205,7 @@ type PodDisruptionBudgetList struct {
     // +genclient
     // +genclient:noVerbs
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.22
     
     // Eviction evicts a pod from its node subject to certain policies and safety constraints.
     // This is a subresource of Pod.  A request to cause such an eviction is
    diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..d6663b9234
    --- /dev/null
    +++ b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,40 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Eviction) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 22
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodDisruptionBudget) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PodDisruptionBudgetList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 21
    +}
    diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
    index d1409913f1..91e33f2332 100644
    --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
    @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1beta1";
     message Eviction {
       // ObjectMeta describes the pod that is being evicted.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // DeleteOptions may be provided
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
     }
     
     // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
    @@ -47,7 +47,7 @@ message PodDisruptionBudget {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the desired behavior of the PodDisruptionBudget.
       // +optional
    @@ -63,7 +63,7 @@ message PodDisruptionBudgetList {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items list individual PodDisruptionBudget objects
       repeated PodDisruptionBudget items = 2;
    @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec {
       // absence of the evicted pod.  So for example you can prevent all voluntary
       // evictions by specifying "100%".
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
     
       // Label query over pods whose evictions are managed by the disruption
       // budget.
    @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec {
       // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods.
       // In policy/v1, an empty selector will select all pods in the namespace.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
     
       // An eviction is allowed if at most "maxUnavailable" pods selected by
       // "selector" are unavailable after the eviction, i.e. even in absence of
       // the evicted pod. For example, one can prevent all voluntary evictions
       // by specifying 0. This is a mutually exclusive setting with "minAvailable".
       // +optional
    -  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
    +  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
     
       // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
       // should be considered for eviction. Current implementation considers healthy pods,
    @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus {
       // If everything goes smooth this map should be empty for the most of the time.
       // Large number of entries in the map may indicate problems with pod deletions.
       // +optional
    -  map disruptedPods = 2;
    +  map disruptedPods = 2;
     
       // Number of pod disruptions that are currently allowed.
       optional int32 disruptionsAllowed = 3;
    @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus {
       // +patchStrategy=merge
       // +listType=map
       // +listMapKey=type
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7;
     }
     
    diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go
    index 80f43ce922..b0e4e5b5b5 100644
    --- a/vendor/k8s.io/api/rbac/v1/doc.go
    +++ b/vendor/k8s.io/api/rbac/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=rbac.authorization.k8s.io
     
     package v1 // import "k8s.io/api/rbac/v1"
    diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto
    index 62f5e558ba..87b8f832d3 100644
    --- a/vendor/k8s.io/api/rbac/v1/generated.proto
    +++ b/vendor/k8s.io/api/rbac/v1/generated.proto
    @@ -34,14 +34,14 @@ message AggregationRule {
       // If any of the selectors match, then the ClusterRole's permissions will be added
       // +optional
       // +listType=atomic
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
     }
     
     // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
     message ClusterRole {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this ClusterRole
       // +optional
    @@ -60,7 +60,7 @@ message ClusterRole {
     message ClusterRoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -77,7 +77,7 @@ message ClusterRoleBinding {
     message ClusterRoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoleBindings
       repeated ClusterRoleBinding items = 2;
    @@ -87,7 +87,7 @@ message ClusterRoleBindingList {
     message ClusterRoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoles
       repeated ClusterRole items = 2;
    @@ -128,7 +128,7 @@ message PolicyRule {
     message Role {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this Role
       // +optional
    @@ -142,7 +142,7 @@ message Role {
     message RoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -159,7 +159,7 @@ message RoleBinding {
     message RoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of RoleBindings
       repeated RoleBinding items = 2;
    @@ -169,7 +169,7 @@ message RoleBindingList {
     message RoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of Roles
       repeated Role items = 2;
    diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go
    index 8bef1ac462..f9628b8536 100644
    --- a/vendor/k8s.io/api/rbac/v1/types.go
    +++ b/vendor/k8s.io/api/rbac/v1/types.go
    @@ -84,7 +84,7 @@ type Subject struct {
     	// Defaults to "" for ServiceAccount subjects.
     	// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
     	// +optional
    -	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"`
    +	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"`
     	// Name of the object being referenced.
     	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
     	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
    @@ -106,6 +106,7 @@ type RoleRef struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
     type Role struct {
    @@ -122,6 +123,7 @@ type Role struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
     // It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
    @@ -144,6 +146,7 @@ type RoleBinding struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // RoleBindingList is a collection of RoleBindings
     type RoleBindingList struct {
    @@ -157,6 +160,7 @@ type RoleBindingList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // RoleList is a collection of Roles
     type RoleList struct {
    @@ -172,6 +176,7 @@ type RoleList struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
     type ClusterRole struct {
    @@ -204,6 +209,7 @@ type AggregationRule struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
     // and adds who information via Subject.
    @@ -225,6 +231,7 @@ type ClusterRoleBinding struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // ClusterRoleBindingList is a collection of ClusterRoleBindings
     type ClusterRoleBindingList struct {
    @@ -238,6 +245,7 @@ type ClusterRoleBindingList struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.8
     
     // ClusterRoleList is a collection of ClusterRoles
     type ClusterRoleList struct {
    diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..f6f74413b8
    --- /dev/null
    +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,70 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *Role) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *RoleList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 8
    +}
    diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
    index 170e008a56..19d43cdee5 100644
    --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
    @@ -34,7 +34,7 @@ message AggregationRule {
       // If any of the selectors match, then the ClusterRole's permissions will be added
       // +optional
       // +listType=atomic
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
     }
     
     // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
    @@ -42,7 +42,7 @@ message AggregationRule {
     message ClusterRole {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this ClusterRole
       // +optional
    @@ -62,7 +62,7 @@ message ClusterRole {
     message ClusterRoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -79,7 +79,7 @@ message ClusterRoleBinding {
     message ClusterRoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoleBindings
       repeated ClusterRoleBinding items = 2;
    @@ -90,7 +90,7 @@ message ClusterRoleBindingList {
     message ClusterRoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoles
       repeated ClusterRole items = 2;
    @@ -132,7 +132,7 @@ message PolicyRule {
     message Role {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this Role
       // +optional
    @@ -147,7 +147,7 @@ message Role {
     message RoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -164,7 +164,7 @@ message RoleBinding {
     message RoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of RoleBindings
       repeated RoleBinding items = 2;
    @@ -175,7 +175,7 @@ message RoleBindingList {
     message RoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of Roles
       repeated Role items = 2;
    diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go
    index 9a0a219774..2146b4ce39 100644
    --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go
    +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go
    @@ -84,7 +84,7 @@ type Subject struct {
     	// Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects.
     	// +k8s:conversion-gen=false
     	// +optional
    -	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"`
    +	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
     	// Name of the object being referenced.
     	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
     	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
    diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
    index 7dfc50d7eb..8bfbd0c8ac 100644
    --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
    @@ -34,7 +34,7 @@ message AggregationRule {
       // If any of the selectors match, then the ClusterRole's permissions will be added
       // +optional
       // +listType=atomic
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
     }
     
     // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
    @@ -42,7 +42,7 @@ message AggregationRule {
     message ClusterRole {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this ClusterRole
       // +optional
    @@ -62,7 +62,7 @@ message ClusterRole {
     message ClusterRoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -79,7 +79,7 @@ message ClusterRoleBinding {
     message ClusterRoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoleBindings
       repeated ClusterRoleBinding items = 2;
    @@ -90,7 +90,7 @@ message ClusterRoleBindingList {
     message ClusterRoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of ClusterRoles
       repeated ClusterRole items = 2;
    @@ -133,7 +133,7 @@ message PolicyRule {
     message Role {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Rules holds all the PolicyRules for this Role
       // +optional
    @@ -148,7 +148,7 @@ message Role {
     message RoleBinding {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Subjects holds references to the objects the role applies to.
       // +optional
    @@ -165,7 +165,7 @@ message RoleBinding {
     message RoleBindingList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of RoleBindings
       repeated RoleBinding items = 2;
    @@ -176,7 +176,7 @@ message RoleBindingList {
     message RoleList {
       // Standard object's metadata.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is a list of Roles
       repeated Role items = 2;
    diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go
    index f761f81a6f..9cfaaceb92 100644
    --- a/vendor/k8s.io/api/rbac/v1beta1/types.go
    +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go
    @@ -84,7 +84,7 @@ type Subject struct {
     	// Defaults to "" for ServiceAccount subjects.
     	// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
     	// +optional
    -	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"`
    +	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"`
     	// Name of the object being referenced.
     	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
     	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/doc.go b/vendor/k8s.io/api/resource/v1alpha2/doc.go
    deleted file mode 100644
    index d9c20e089d..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/doc.go
    +++ /dev/null
    @@ -1,24 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// +k8s:openapi-gen=true
    -// +k8s:deepcopy-gen=package
    -// +k8s:protobuf-gen=package
    -
    -// +groupName=resource.k8s.io
    -
    -// Package v1alpha2 is the v1alpha2 version of the resource API.
    -package v1alpha2 // import "k8s.io/api/resource/v1alpha2"
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go
    deleted file mode 100644
    index 6c6ba438e3..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go
    +++ /dev/null
    @@ -1,10589 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by protoc-gen-gogo. DO NOT EDIT.
    -// source: k8s.io/api/resource/v1alpha2/generated.proto
    -
    -package v1alpha2
    -
    -import (
    -	fmt "fmt"
    -
    -	io "io"
    -
    -	proto "github.com/gogo/protobuf/proto"
    -	v1 "k8s.io/api/core/v1"
    -	resource "k8s.io/apimachinery/pkg/api/resource"
    -
    -	math "math"
    -	math_bits "math/bits"
    -	reflect "reflect"
    -	strings "strings"
    -
    -	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
    -)
    -
    -// Reference imports to suppress errors if they are not otherwise used.
    -var _ = proto.Marshal
    -var _ = fmt.Errorf
    -var _ = math.Inf
    -
    -// This is a compile-time assertion to ensure that this generated file
    -// is compatible with the proto package it is being compiled against.
    -// A compilation error at this line likely means your copy of the
    -// proto package needs to be updated.
    -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    -
    -func (m *AllocationResult) Reset()      { *m = AllocationResult{} }
    -func (*AllocationResult) ProtoMessage() {}
    -func (*AllocationResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{0}
    -}
    -func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *AllocationResult) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AllocationResult.Merge(m, src)
    -}
    -func (m *AllocationResult) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *AllocationResult) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AllocationResult.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
    -
    -func (m *AllocationResultModel) Reset()      { *m = AllocationResultModel{} }
    -func (*AllocationResultModel) ProtoMessage() {}
    -func (*AllocationResultModel) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{1}
    -}
    -func (m *AllocationResultModel) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *AllocationResultModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *AllocationResultModel) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_AllocationResultModel.Merge(m, src)
    -}
    -func (m *AllocationResultModel) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *AllocationResultModel) XXX_DiscardUnknown() {
    -	xxx_messageInfo_AllocationResultModel.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_AllocationResultModel proto.InternalMessageInfo
    -
    -func (m *DriverAllocationResult) Reset()      { *m = DriverAllocationResult{} }
    -func (*DriverAllocationResult) ProtoMessage() {}
    -func (*DriverAllocationResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{2}
    -}
    -func (m *DriverAllocationResult) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *DriverAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *DriverAllocationResult) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_DriverAllocationResult.Merge(m, src)
    -}
    -func (m *DriverAllocationResult) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *DriverAllocationResult) XXX_DiscardUnknown() {
    -	xxx_messageInfo_DriverAllocationResult.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_DriverAllocationResult proto.InternalMessageInfo
    -
    -func (m *DriverRequests) Reset()      { *m = DriverRequests{} }
    -func (*DriverRequests) ProtoMessage() {}
    -func (*DriverRequests) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{3}
    -}
    -func (m *DriverRequests) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *DriverRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *DriverRequests) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_DriverRequests.Merge(m, src)
    -}
    -func (m *DriverRequests) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *DriverRequests) XXX_DiscardUnknown() {
    -	xxx_messageInfo_DriverRequests.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_DriverRequests proto.InternalMessageInfo
    -
    -func (m *NamedResourcesAllocationResult) Reset()      { *m = NamedResourcesAllocationResult{} }
    -func (*NamedResourcesAllocationResult) ProtoMessage() {}
    -func (*NamedResourcesAllocationResult) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{4}
    -}
    -func (m *NamedResourcesAllocationResult) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesAllocationResult) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesAllocationResult.Merge(m, src)
    -}
    -func (m *NamedResourcesAllocationResult) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesAllocationResult) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesAllocationResult.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesAllocationResult proto.InternalMessageInfo
    -
    -func (m *NamedResourcesAttribute) Reset()      { *m = NamedResourcesAttribute{} }
    -func (*NamedResourcesAttribute) ProtoMessage() {}
    -func (*NamedResourcesAttribute) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{5}
    -}
    -func (m *NamedResourcesAttribute) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesAttribute) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesAttribute.Merge(m, src)
    -}
    -func (m *NamedResourcesAttribute) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesAttribute) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesAttribute.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesAttribute proto.InternalMessageInfo
    -
    -func (m *NamedResourcesAttributeValue) Reset()      { *m = NamedResourcesAttributeValue{} }
    -func (*NamedResourcesAttributeValue) ProtoMessage() {}
    -func (*NamedResourcesAttributeValue) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{6}
    -}
    -func (m *NamedResourcesAttributeValue) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesAttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesAttributeValue) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesAttributeValue.Merge(m, src)
    -}
    -func (m *NamedResourcesAttributeValue) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesAttributeValue) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesAttributeValue.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesAttributeValue proto.InternalMessageInfo
    -
    -func (m *NamedResourcesFilter) Reset()      { *m = NamedResourcesFilter{} }
    -func (*NamedResourcesFilter) ProtoMessage() {}
    -func (*NamedResourcesFilter) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{7}
    -}
    -func (m *NamedResourcesFilter) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesFilter) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesFilter.Merge(m, src)
    -}
    -func (m *NamedResourcesFilter) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesFilter) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesFilter.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesFilter proto.InternalMessageInfo
    -
    -func (m *NamedResourcesInstance) Reset()      { *m = NamedResourcesInstance{} }
    -func (*NamedResourcesInstance) ProtoMessage() {}
    -func (*NamedResourcesInstance) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{8}
    -}
    -func (m *NamedResourcesInstance) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesInstance) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesInstance.Merge(m, src)
    -}
    -func (m *NamedResourcesInstance) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesInstance) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesInstance.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesInstance proto.InternalMessageInfo
    -
    -func (m *NamedResourcesIntSlice) Reset()      { *m = NamedResourcesIntSlice{} }
    -func (*NamedResourcesIntSlice) ProtoMessage() {}
    -func (*NamedResourcesIntSlice) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{9}
    -}
    -func (m *NamedResourcesIntSlice) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesIntSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesIntSlice) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesIntSlice.Merge(m, src)
    -}
    -func (m *NamedResourcesIntSlice) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesIntSlice) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesIntSlice.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesIntSlice proto.InternalMessageInfo
    -
    -func (m *NamedResourcesRequest) Reset()      { *m = NamedResourcesRequest{} }
    -func (*NamedResourcesRequest) ProtoMessage() {}
    -func (*NamedResourcesRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{10}
    -}
    -func (m *NamedResourcesRequest) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesRequest.Merge(m, src)
    -}
    -func (m *NamedResourcesRequest) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesRequest proto.InternalMessageInfo
    -
    -func (m *NamedResourcesResources) Reset()      { *m = NamedResourcesResources{} }
    -func (*NamedResourcesResources) ProtoMessage() {}
    -func (*NamedResourcesResources) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{11}
    -}
    -func (m *NamedResourcesResources) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesResources) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesResources.Merge(m, src)
    -}
    -func (m *NamedResourcesResources) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesResources) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesResources.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesResources proto.InternalMessageInfo
    -
    -func (m *NamedResourcesStringSlice) Reset()      { *m = NamedResourcesStringSlice{} }
    -func (*NamedResourcesStringSlice) ProtoMessage() {}
    -func (*NamedResourcesStringSlice) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{12}
    -}
    -func (m *NamedResourcesStringSlice) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *NamedResourcesStringSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *NamedResourcesStringSlice) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_NamedResourcesStringSlice.Merge(m, src)
    -}
    -func (m *NamedResourcesStringSlice) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *NamedResourcesStringSlice) XXX_DiscardUnknown() {
    -	xxx_messageInfo_NamedResourcesStringSlice.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_NamedResourcesStringSlice proto.InternalMessageInfo
    -
    -func (m *PodSchedulingContext) Reset()      { *m = PodSchedulingContext{} }
    -func (*PodSchedulingContext) ProtoMessage() {}
    -func (*PodSchedulingContext) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{13}
    -}
    -func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *PodSchedulingContext) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PodSchedulingContext.Merge(m, src)
    -}
    -func (m *PodSchedulingContext) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *PodSchedulingContext) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo
    -
    -func (m *PodSchedulingContextList) Reset()      { *m = PodSchedulingContextList{} }
    -func (*PodSchedulingContextList) ProtoMessage() {}
    -func (*PodSchedulingContextList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{14}
    -}
    -func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PodSchedulingContextList.Merge(m, src)
    -}
    -func (m *PodSchedulingContextList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *PodSchedulingContextList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo
    -
    -func (m *PodSchedulingContextSpec) Reset()      { *m = PodSchedulingContextSpec{} }
    -func (*PodSchedulingContextSpec) ProtoMessage() {}
    -func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{15}
    -}
    -func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src)
    -}
    -func (m *PodSchedulingContextSpec) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo
    -
    -func (m *PodSchedulingContextStatus) Reset()      { *m = PodSchedulingContextStatus{} }
    -func (*PodSchedulingContextStatus) ProtoMessage() {}
    -func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{16}
    -}
    -func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src)
    -}
    -func (m *PodSchedulingContextStatus) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() {
    -	xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo
    -
    -func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
    -func (*ResourceClaim) ProtoMessage() {}
    -func (*ResourceClaim) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{17}
    -}
    -func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaim) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaim.Merge(m, src)
    -}
    -func (m *ResourceClaim) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaim) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
    -
    -func (m *ResourceClaimConsumerReference) Reset()      { *m = ResourceClaimConsumerReference{} }
    -func (*ResourceClaimConsumerReference) ProtoMessage() {}
    -func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{18}
    -}
    -func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
    -}
    -func (m *ResourceClaimConsumerReference) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
    -
    -func (m *ResourceClaimList) Reset()      { *m = ResourceClaimList{} }
    -func (*ResourceClaimList) ProtoMessage() {}
    -func (*ResourceClaimList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{19}
    -}
    -func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimList.Merge(m, src)
    -}
    -func (m *ResourceClaimList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
    -
    -func (m *ResourceClaimParameters) Reset()      { *m = ResourceClaimParameters{} }
    -func (*ResourceClaimParameters) ProtoMessage() {}
    -func (*ResourceClaimParameters) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{20}
    -}
    -func (m *ResourceClaimParameters) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimParameters) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimParameters.Merge(m, src)
    -}
    -func (m *ResourceClaimParameters) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimParameters) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimParameters.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimParameters proto.InternalMessageInfo
    -
    -func (m *ResourceClaimParametersList) Reset()      { *m = ResourceClaimParametersList{} }
    -func (*ResourceClaimParametersList) ProtoMessage() {}
    -func (*ResourceClaimParametersList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{21}
    -}
    -func (m *ResourceClaimParametersList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimParametersList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimParametersList.Merge(m, src)
    -}
    -func (m *ResourceClaimParametersList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimParametersList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimParametersList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimParametersList proto.InternalMessageInfo
    -
    -func (m *ResourceClaimParametersReference) Reset()      { *m = ResourceClaimParametersReference{} }
    -func (*ResourceClaimParametersReference) ProtoMessage() {}
    -func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{22}
    -}
    -func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimParametersReference) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimParametersReference.Merge(m, src)
    -}
    -func (m *ResourceClaimParametersReference) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimParametersReference) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimParametersReference.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo
    -
    -func (m *ResourceClaimSchedulingStatus) Reset()      { *m = ResourceClaimSchedulingStatus{} }
    -func (*ResourceClaimSchedulingStatus) ProtoMessage() {}
    -func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{23}
    -}
    -func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src)
    -}
    -func (m *ResourceClaimSchedulingStatus) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo
    -
    -func (m *ResourceClaimSpec) Reset()      { *m = ResourceClaimSpec{} }
    -func (*ResourceClaimSpec) ProtoMessage() {}
    -func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{24}
    -}
    -func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
    -}
    -func (m *ResourceClaimSpec) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
    -
    -func (m *ResourceClaimStatus) Reset()      { *m = ResourceClaimStatus{} }
    -func (*ResourceClaimStatus) ProtoMessage() {}
    -func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{25}
    -}
    -func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
    -}
    -func (m *ResourceClaimStatus) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
    -
    -func (m *ResourceClaimTemplate) Reset()      { *m = ResourceClaimTemplate{} }
    -func (*ResourceClaimTemplate) ProtoMessage() {}
    -func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{26}
    -}
    -func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
    -}
    -func (m *ResourceClaimTemplate) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
    -
    -func (m *ResourceClaimTemplateList) Reset()      { *m = ResourceClaimTemplateList{} }
    -func (*ResourceClaimTemplateList) ProtoMessage() {}
    -func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{27}
    -}
    -func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
    -}
    -func (m *ResourceClaimTemplateList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
    -
    -func (m *ResourceClaimTemplateSpec) Reset()      { *m = ResourceClaimTemplateSpec{} }
    -func (*ResourceClaimTemplateSpec) ProtoMessage() {}
    -func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{28}
    -}
    -func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
    -}
    -func (m *ResourceClaimTemplateSpec) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
    -
    -func (m *ResourceClass) Reset()      { *m = ResourceClass{} }
    -func (*ResourceClass) ProtoMessage() {}
    -func (*ResourceClass) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{29}
    -}
    -func (m *ResourceClass) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClass) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClass.Merge(m, src)
    -}
    -func (m *ResourceClass) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClass) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClass.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClass proto.InternalMessageInfo
    -
    -func (m *ResourceClassList) Reset()      { *m = ResourceClassList{} }
    -func (*ResourceClassList) ProtoMessage() {}
    -func (*ResourceClassList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{30}
    -}
    -func (m *ResourceClassList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClassList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClassList.Merge(m, src)
    -}
    -func (m *ResourceClassList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClassList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClassList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo
    -
    -func (m *ResourceClassParameters) Reset()      { *m = ResourceClassParameters{} }
    -func (*ResourceClassParameters) ProtoMessage() {}
    -func (*ResourceClassParameters) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{31}
    -}
    -func (m *ResourceClassParameters) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClassParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClassParameters) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClassParameters.Merge(m, src)
    -}
    -func (m *ResourceClassParameters) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClassParameters) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClassParameters.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClassParameters proto.InternalMessageInfo
    -
    -func (m *ResourceClassParametersList) Reset()      { *m = ResourceClassParametersList{} }
    -func (*ResourceClassParametersList) ProtoMessage() {}
    -func (*ResourceClassParametersList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{32}
    -}
    -func (m *ResourceClassParametersList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClassParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClassParametersList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClassParametersList.Merge(m, src)
    -}
    -func (m *ResourceClassParametersList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClassParametersList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClassParametersList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClassParametersList proto.InternalMessageInfo
    -
    -func (m *ResourceClassParametersReference) Reset()      { *m = ResourceClassParametersReference{} }
    -func (*ResourceClassParametersReference) ProtoMessage() {}
    -func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{33}
    -}
    -func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceClassParametersReference) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceClassParametersReference.Merge(m, src)
    -}
    -func (m *ResourceClassParametersReference) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceClassParametersReference) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceClassParametersReference.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo
    -
    -func (m *ResourceFilter) Reset()      { *m = ResourceFilter{} }
    -func (*ResourceFilter) ProtoMessage() {}
    -func (*ResourceFilter) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{34}
    -}
    -func (m *ResourceFilter) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceFilter) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceFilter.Merge(m, src)
    -}
    -func (m *ResourceFilter) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceFilter) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceFilter.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceFilter proto.InternalMessageInfo
    -
    -func (m *ResourceFilterModel) Reset()      { *m = ResourceFilterModel{} }
    -func (*ResourceFilterModel) ProtoMessage() {}
    -func (*ResourceFilterModel) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{35}
    -}
    -func (m *ResourceFilterModel) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceFilterModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceFilterModel) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceFilterModel.Merge(m, src)
    -}
    -func (m *ResourceFilterModel) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceFilterModel) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceFilterModel.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceFilterModel proto.InternalMessageInfo
    -
    -func (m *ResourceHandle) Reset()      { *m = ResourceHandle{} }
    -func (*ResourceHandle) ProtoMessage() {}
    -func (*ResourceHandle) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{36}
    -}
    -func (m *ResourceHandle) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceHandle) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceHandle.Merge(m, src)
    -}
    -func (m *ResourceHandle) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceHandle) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceHandle.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo
    -
    -func (m *ResourceModel) Reset()      { *m = ResourceModel{} }
    -func (*ResourceModel) ProtoMessage() {}
    -func (*ResourceModel) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{37}
    -}
    -func (m *ResourceModel) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceModel) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceModel.Merge(m, src)
    -}
    -func (m *ResourceModel) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceModel) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceModel.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceModel proto.InternalMessageInfo
    -
    -func (m *ResourceRequest) Reset()      { *m = ResourceRequest{} }
    -func (*ResourceRequest) ProtoMessage() {}
    -func (*ResourceRequest) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{38}
    -}
    -func (m *ResourceRequest) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceRequest) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceRequest.Merge(m, src)
    -}
    -func (m *ResourceRequest) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceRequest) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceRequest.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceRequest proto.InternalMessageInfo
    -
    -func (m *ResourceRequestModel) Reset()      { *m = ResourceRequestModel{} }
    -func (*ResourceRequestModel) ProtoMessage() {}
    -func (*ResourceRequestModel) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{39}
    -}
    -func (m *ResourceRequestModel) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceRequestModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceRequestModel) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceRequestModel.Merge(m, src)
    -}
    -func (m *ResourceRequestModel) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceRequestModel) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceRequestModel.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceRequestModel proto.InternalMessageInfo
    -
    -func (m *ResourceSlice) Reset()      { *m = ResourceSlice{} }
    -func (*ResourceSlice) ProtoMessage() {}
    -func (*ResourceSlice) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{40}
    -}
    -func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceSlice) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceSlice.Merge(m, src)
    -}
    -func (m *ResourceSlice) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceSlice) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
    -
    -func (m *ResourceSliceList) Reset()      { *m = ResourceSliceList{} }
    -func (*ResourceSliceList) ProtoMessage() {}
    -func (*ResourceSliceList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{41}
    -}
    -func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_ResourceSliceList.Merge(m, src)
    -}
    -func (m *ResourceSliceList) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *ResourceSliceList) XXX_DiscardUnknown() {
    -	xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
    -
    -func (m *StructuredResourceHandle) Reset()      { *m = StructuredResourceHandle{} }
    -func (*StructuredResourceHandle) ProtoMessage() {}
    -func (*StructuredResourceHandle) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{42}
    -}
    -func (m *StructuredResourceHandle) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *StructuredResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *StructuredResourceHandle) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_StructuredResourceHandle.Merge(m, src)
    -}
    -func (m *StructuredResourceHandle) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *StructuredResourceHandle) XXX_DiscardUnknown() {
    -	xxx_messageInfo_StructuredResourceHandle.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_StructuredResourceHandle proto.InternalMessageInfo
    -
    -func (m *VendorParameters) Reset()      { *m = VendorParameters{} }
    -func (*VendorParameters) ProtoMessage() {}
    -func (*VendorParameters) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_4312f5b44a31ec02, []int{43}
    -}
    -func (m *VendorParameters) XXX_Unmarshal(b []byte) error {
    -	return m.Unmarshal(b)
    -}
    -func (m *VendorParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    -	b = b[:cap(b)]
    -	n, err := m.MarshalToSizedBuffer(b)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return b[:n], nil
    -}
    -func (m *VendorParameters) XXX_Merge(src proto.Message) {
    -	xxx_messageInfo_VendorParameters.Merge(m, src)
    -}
    -func (m *VendorParameters) XXX_Size() int {
    -	return m.Size()
    -}
    -func (m *VendorParameters) XXX_DiscardUnknown() {
    -	xxx_messageInfo_VendorParameters.DiscardUnknown(m)
    -}
    -
    -var xxx_messageInfo_VendorParameters proto.InternalMessageInfo
    -
    -func init() {
    -	proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult")
    -	proto.RegisterType((*AllocationResultModel)(nil), "k8s.io.api.resource.v1alpha2.AllocationResultModel")
    -	proto.RegisterType((*DriverAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.DriverAllocationResult")
    -	proto.RegisterType((*DriverRequests)(nil), "k8s.io.api.resource.v1alpha2.DriverRequests")
    -	proto.RegisterType((*NamedResourcesAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAllocationResult")
    -	proto.RegisterType((*NamedResourcesAttribute)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttribute")
    -	proto.RegisterType((*NamedResourcesAttributeValue)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttributeValue")
    -	proto.RegisterType((*NamedResourcesFilter)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesFilter")
    -	proto.RegisterType((*NamedResourcesInstance)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesInstance")
    -	proto.RegisterType((*NamedResourcesIntSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesIntSlice")
    -	proto.RegisterType((*NamedResourcesRequest)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesRequest")
    -	proto.RegisterType((*NamedResourcesResources)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesResources")
    -	proto.RegisterType((*NamedResourcesStringSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesStringSlice")
    -	proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext")
    -	proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList")
    -	proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec")
    -	proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus")
    -	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim")
    -	proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference")
    -	proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList")
    -	proto.RegisterType((*ResourceClaimParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParameters")
    -	proto.RegisterType((*ResourceClaimParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersList")
    -	proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference")
    -	proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus")
    -	proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec")
    -	proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus")
    -	proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate")
    -	proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList")
    -	proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec")
    -	proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass")
    -	proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList")
    -	proto.RegisterType((*ResourceClassParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParameters")
    -	proto.RegisterType((*ResourceClassParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersList")
    -	proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference")
    -	proto.RegisterType((*ResourceFilter)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilter")
    -	proto.RegisterType((*ResourceFilterModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilterModel")
    -	proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle")
    -	proto.RegisterType((*ResourceModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceModel")
    -	proto.RegisterType((*ResourceRequest)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequest")
    -	proto.RegisterType((*ResourceRequestModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequestModel")
    -	proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha2.ResourceSlice")
    -	proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha2.ResourceSliceList")
    -	proto.RegisterType((*StructuredResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.StructuredResourceHandle")
    -	proto.RegisterType((*VendorParameters)(nil), "k8s.io.api.resource.v1alpha2.VendorParameters")
    -}
    -
    -func init() {
    -	proto.RegisterFile("k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_4312f5b44a31ec02)
    -}
    -
    -var fileDescriptor_4312f5b44a31ec02 = []byte{
    -	// 2242 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x6c, 0x1c, 0x57,
    -	0xd9, 0xb3, 0xbb, 0x89, 0xd7, 0x9f, 0xed, 0xb5, 0x33, 0xb6, 0xe3, 0x4d, 0xea, 0xee, 0x6e, 0x47,
    -	0x20, 0x2c, 0x70, 0x76, 0x1b, 0xa7, 0x4d, 0xa3, 0x52, 0x90, 0x32, 0x71, 0x13, 0x2c, 0x9a, 0xd4,
    -	0x7d, 0x4b, 0xdc, 0xa6, 0xfc, 0x75, 0xbc, 0xf3, 0x62, 0x0f, 0xd9, 0x9d, 0xd9, 0xcc, 0x7b, 0xeb,
    -	0x26, 0xe2, 0x12, 0x55, 0x20, 0xb8, 0x20, 0x15, 0x81, 0x10, 0x9c, 0x38, 0x21, 0xc4, 0x85, 0x0b,
    -	0x5c, 0x39, 0x55, 0xd0, 0x1c, 0x83, 0x40, 0xa2, 0xe2, 0xb0, 0x22, 0xcb, 0x91, 0x23, 0xb7, 0x9e,
    -	0xd0, 0xbc, 0xf7, 0xe6, 0xe7, 0xcd, 0xce, 0xac, 0x77, 0x96, 0xc6, 0x4a, 0x4e, 0xde, 0x79, 0xef,
    -	0xfb, 0x7b, 0xdf, 0xff, 0x7b, 0x9f, 0x61, 0xe3, 0xce, 0x25, 0x52, 0xb7, 0x9c, 0x86, 0xd1, 0xb5,
    -	0x1a, 0x2e, 0x26, 0x4e, 0xcf, 0x6d, 0xe1, 0xc6, 0xe1, 0x79, 0xa3, 0xdd, 0x3d, 0x30, 0x36, 0x1b,
    -	0xfb, 0xd8, 0xc6, 0xae, 0x41, 0xb1, 0x59, 0xef, 0xba, 0x0e, 0x75, 0xd4, 0x35, 0x0e, 0x5d, 0x37,
    -	0xba, 0x56, 0xdd, 0x87, 0xae, 0xfb, 0xd0, 0x67, 0xcf, 0xed, 0x5b, 0xf4, 0xa0, 0xb7, 0x57, 0x6f,
    -	0x39, 0x9d, 0xc6, 0xbe, 0xb3, 0xef, 0x34, 0x18, 0xd2, 0x5e, 0xef, 0x36, 0xfb, 0x62, 0x1f, 0xec,
    -	0x17, 0x27, 0x76, 0x56, 0x8b, 0xb0, 0x6e, 0x39, 0xae, 0xc7, 0x36, 0xce, 0xf0, 0xec, 0x4b, 0x21,
    -	0x4c, 0xc7, 0x68, 0x1d, 0x58, 0x36, 0x76, 0xef, 0x37, 0xba, 0x77, 0xf6, 0x65, 0x79, 0xb3, 0x60,
    -	0x91, 0x46, 0x07, 0x53, 0x23, 0x89, 0x57, 0x23, 0x0d, 0xcb, 0xed, 0xd9, 0xd4, 0xea, 0x0c, 0xb3,
    -	0xb9, 0x78, 0x14, 0x02, 0x69, 0x1d, 0xe0, 0x8e, 0x11, 0xc7, 0xd3, 0x7e, 0x99, 0x83, 0xc5, 0xcb,
    -	0xed, 0xb6, 0xd3, 0x32, 0xa8, 0xe5, 0xd8, 0x08, 0x93, 0x5e, 0x9b, 0xaa, 0x0e, 0x2c, 0xf8, 0xe7,
    -	0xf9, 0x9a, 0x61, 0x9b, 0x6d, 0x4c, 0xca, 0x4a, 0x2d, 0xbf, 0x3e, 0xbb, 0xb9, 0x51, 0x1f, 0xa5,
    -	0xf4, 0x3a, 0x92, 0x90, 0xf4, 0xd5, 0x87, 0xfd, 0xea, 0xd4, 0xa0, 0x5f, 0x5d, 0x90, 0xd7, 0x09,
    -	0x8a, 0x53, 0x57, 0xf7, 0x60, 0xd1, 0x38, 0x34, 0xac, 0xb6, 0xb1, 0xd7, 0xc6, 0x6f, 0xda, 0x37,
    -	0x1c, 0x13, 0x93, 0x72, 0xae, 0xa6, 0xac, 0xcf, 0x6e, 0xd6, 0xa2, 0x1c, 0x3d, 0xcb, 0xd4, 0x0f,
    -	0xcf, 0xd7, 0x3d, 0x80, 0x26, 0x6e, 0xe3, 0x16, 0x75, 0x5c, 0x7d, 0x79, 0xd0, 0xaf, 0x2e, 0x5e,
    -	0x8e, 0x61, 0xa3, 0x21, 0x7a, 0x6a, 0x03, 0x66, 0xc8, 0x81, 0xe1, 0x62, 0x6f, 0xad, 0x9c, 0xaf,
    -	0x29, 0xeb, 0x45, 0xfd, 0x94, 0x10, 0x70, 0xa6, 0xe9, 0x6f, 0xa0, 0x10, 0x46, 0xfb, 0xa9, 0x02,
    -	0x2b, 0x71, 0xd5, 0x5c, 0x77, 0x4c, 0xdc, 0x56, 0xef, 0x41, 0xc9, 0x36, 0x3a, 0xd8, 0xf4, 0xcf,
    -	0xe5, 0xa9, 0xc7, 0x13, 0xf6, 0xb5, 0xd1, 0xea, 0xb9, 0x21, 0xe1, 0xc4, 0x49, 0xeb, 0xea, 0xa0,
    -	0x5f, 0x2d, 0xc9, 0x30, 0x28, 0xc6, 0x47, 0xfb, 0x7d, 0x0e, 0x4e, 0x6f, 0xb9, 0xd6, 0x21, 0x76,
    -	0x87, 0x8c, 0xf6, 0x63, 0x05, 0x56, 0x0f, 0xb1, 0x6d, 0x3a, 0x2e, 0xc2, 0x77, 0x7b, 0x98, 0xd0,
    -	0x1d, 0xc3, 0x35, 0x3a, 0x98, 0x62, 0xd7, 0x17, 0xef, 0x5c, 0x44, 0xbc, 0xc0, 0x49, 0xea, 0xdd,
    -	0x3b, 0xfb, 0x75, 0xe1, 0x24, 0x75, 0x64, 0xbc, 0xff, 0xfa, 0x3d, 0x8a, 0x6d, 0x62, 0x39, 0xb6,
    -	0x5e, 0x15, 0xda, 0x59, 0xdd, 0x4d, 0xa6, 0x8a, 0xd2, 0xd8, 0x79, 0xa2, 0xac, 0x18, 0x49, 0x9a,
    -	0x13, 0x46, 0xbd, 0x30, 0x5a, 0x4f, 0x89, 0x4a, 0xd7, 0x9f, 0x17, 0xe2, 0x24, 0xdb, 0x04, 0x25,
    -	0x33, 0xd4, 0x7e, 0x91, 0x83, 0x12, 0x57, 0x98, 0x10, 0x93, 0xa8, 0x9b, 0x00, 0x26, 0x5b, 0xf1,
    -	0x74, 0xcd, 0x54, 0x33, 0xa3, 0xab, 0x82, 0x38, 0x6c, 0x05, 0x3b, 0x28, 0x02, 0xa5, 0x12, 0x58,
    -	0xe4, 0x87, 0x8d, 0x28, 0x35, 0x37, 0x89, 0x52, 0xcb, 0x82, 0xd1, 0xe2, 0x6e, 0x8c, 0x1c, 0x1a,
    -	0x62, 0xa0, 0x7e, 0x13, 0x8a, 0xae, 0x10, 0xba, 0x9c, 0x67, 0xf1, 0x77, 0x6e, 0xbc, 0xf8, 0x13,
    -	0x47, 0xd5, 0x17, 0x05, 0xb3, 0xa2, 0x7f, 0x76, 0x14, 0x10, 0xd4, 0x74, 0xa8, 0x8c, 0xf6, 0x47,
    -	0xb5, 0x06, 0x05, 0x3b, 0xd4, 0xd0, 0x9c, 0xa0, 0x55, 0x60, 0xba, 0x61, 0x3b, 0xda, 0x5f, 0x14,
    -	0x58, 0x8d, 0x11, 0xa1, 0xd4, 0xb5, 0xf6, 0x7a, 0x14, 0x1f, 0x8d, 0xed, 0x79, 0x49, 0xc9, 0xf0,
    -	0xe1, 0x77, 0x8d, 0x76, 0x0f, 0x0b, 0x95, 0xbe, 0x9a, 0x29, 0x8c, 0x24, 0x0a, 0xfa, 0xe7, 0x04,
    -	0xa3, 0xb5, 0x51, 0x50, 0x28, 0xc6, 0x57, 0xfb, 0x4f, 0x1e, 0x46, 0x22, 0xa8, 0xdf, 0x86, 0xe2,
    -	0xdd, 0x9e, 0x61, 0x53, 0x8b, 0xde, 0x2f, 0x9f, 0x64, 0x42, 0xd6, 0x53, 0xed, 0x2e, 0x49, 0xfd,
    -	0x96, 0xc0, 0xd2, 0x4f, 0x0d, 0xfa, 0xd5, 0x79, 0xff, 0x8b, 0x4b, 0x11, 0x90, 0x54, 0x5f, 0x80,
    -	0xc2, 0x9e, 0xe3, 0xf0, 0xf0, 0x28, 0xea, 0xf3, 0x5e, 0x4a, 0xd2, 0x1d, 0xa7, 0xcd, 0xc1, 0xd8,
    -	0x96, 0x5a, 0x81, 0xbc, 0x65, 0xd3, 0xf2, 0x74, 0x4d, 0x59, 0xcf, 0xeb, 0x73, 0x9e, 0x51, 0xb7,
    -	0x6d, 0xca, 0x01, 0xbc, 0x0d, 0xb5, 0x05, 0x45, 0xcb, 0xa6, 0xcd, 0xb6, 0xd5, 0xc2, 0xe5, 0x22,
    -	0x93, 0xf0, 0xa5, 0x2c, 0x6a, 0xdc, 0x16, 0xb8, 0x5c, 0x4e, 0xff, 0x4b, 0xc8, 0xe9, 0x13, 0x56,
    -	0xbf, 0x00, 0x27, 0x09, 0x75, 0x2d, 0x7b, 0xbf, 0x7c, 0x82, 0x99, 0x75, 0x61, 0xd0, 0xaf, 0xce,
    -	0x36, 0xd9, 0x0a, 0x07, 0x15, 0xdb, 0xaa, 0x03, 0xb3, 0xfc, 0x17, 0x17, 0x68, 0x86, 0x09, 0xf4,
    -	0x4a, 0x16, 0x81, 0x9a, 0x21, 0x3a, 0x4f, 0xf1, 0x91, 0x05, 0xce, 0x2b, 0xca, 0x41, 0xfd, 0x22,
    -	0x4c, 0x1f, 0x62, 0xd7, 0x0b, 0xb1, 0x32, 0x30, 0xd1, 0x16, 0x07, 0xfd, 0xea, 0xdc, 0x2e, 0x5f,
    -	0xe2, 0xf0, 0x3e, 0x80, 0xb6, 0x05, 0xcb, 0x32, 0xaf, 0xab, 0x56, 0x9b, 0x62, 0x57, 0xdd, 0x80,
    -	0x22, 0x11, 0x55, 0x45, 0xb8, 0x6d, 0x10, 0x40, 0x7e, 0xb5, 0x41, 0x01, 0x84, 0xf6, 0x1b, 0x05,
    -	0x4e, 0xc7, 0x75, 0x48, 0xa8, 0x61, 0xb7, 0xc6, 0xf1, 0x7d, 0x0b, 0x20, 0x70, 0x41, 0x2f, 0x93,
    -	0x78, 0xc1, 0xfd, 0xf2, 0x44, 0x6e, 0x1f, 0xa6, 0xae, 0x60, 0x89, 0xa0, 0x08, 0x71, 0xed, 0xe2,
    -	0xb0, 0x98, 0xc2, 0x9a, 0x6b, 0x50, 0xb0, 0x6c, 0xca, 0x6b, 0x7b, 0x5e, 0x2f, 0x7a, 0x22, 0x6e,
    -	0xdb, 0x94, 0x20, 0xb6, 0xaa, 0xbd, 0x0e, 0x2b, 0xb1, 0x62, 0xc4, 0x53, 0x47, 0x46, 0x35, 0x3d,
    -	0x18, 0xca, 0x11, 0xc1, 0x0f, 0x15, 0xc3, 0x8c, 0x25, 0x74, 0xe6, 0x77, 0x18, 0x19, 0x9d, 0x96,
    -	0x23, 0x87, 0x85, 0xdc, 0x5f, 0x21, 0x28, 0xa4, 0xac, 0xe9, 0x70, 0x26, 0xd5, 0xb7, 0xd4, 0xcf,
    -	0xc3, 0x34, 0xf7, 0x23, 0x2e, 0xc1, 0x8c, 0x3e, 0x3b, 0xe8, 0x57, 0xa7, 0x39, 0x04, 0x41, 0xfe,
    -	0x9e, 0xf6, 0xc7, 0x1c, 0x2c, 0xef, 0x38, 0x66, 0xb3, 0x75, 0x80, 0xcd, 0x5e, 0xdb, 0xb2, 0xf7,
    -	0xaf, 0x38, 0x36, 0xc5, 0xf7, 0xa8, 0xfa, 0x1e, 0x14, 0xbd, 0x26, 0xce, 0x34, 0xa8, 0x21, 0xca,
    -	0xec, 0x8b, 0xa3, 0x32, 0x03, 0xa9, 0x7b, 0xd0, 0x5e, 0x13, 0xf3, 0xe6, 0xde, 0xf7, 0x70, 0x8b,
    -	0x5e, 0xc7, 0xd4, 0x08, 0x4d, 0x18, 0xae, 0xa1, 0x80, 0xaa, 0xfa, 0x0e, 0x14, 0x48, 0x17, 0xb7,
    -	0x44, 0x72, 0xbc, 0x38, 0x5a, 0x41, 0x49, 0x32, 0x36, 0xbb, 0xb8, 0x15, 0x7a, 0xa1, 0xf7, 0x85,
    -	0x18, 0x45, 0xf5, 0x3d, 0x2f, 0x9c, 0x0d, 0xda, 0x23, 0xac, 0x1f, 0x9a, 0xdd, 0xbc, 0x34, 0x01,
    -	0x6d, 0x86, 0xaf, 0x97, 0x04, 0xf5, 0x93, 0xfc, 0x1b, 0x09, 0xba, 0xda, 0x5f, 0x15, 0x28, 0x27,
    -	0xa1, 0xbd, 0x61, 0x11, 0xaa, 0x7e, 0x6b, 0x48, 0x75, 0xf5, 0xf1, 0x54, 0xe7, 0x61, 0x33, 0xc5,
    -	0x05, 0x8e, 0xe7, 0xaf, 0x44, 0xd4, 0xf6, 0x36, 0x9c, 0xb0, 0x28, 0xee, 0xf8, 0xd1, 0xb5, 0x99,
    -	0xfd, 0x6c, 0xfa, 0xbc, 0x20, 0x7f, 0x62, 0xdb, 0x23, 0x84, 0x38, 0x3d, 0xed, 0xc3, 0x94, 0x33,
    -	0x79, 0x8a, 0x55, 0x2f, 0xc1, 0x1c, 0x77, 0x7d, 0x6c, 0x7a, 0x6d, 0xa7, 0x08, 0x90, 0x65, 0x41,
    -	0x68, 0xae, 0x19, 0xd9, 0x43, 0x12, 0xa4, 0xfa, 0x2a, 0x94, 0xba, 0x0e, 0xc5, 0x36, 0xb5, 0x8c,
    -	0xb6, 0xdf, 0x01, 0x7b, 0xfe, 0xc8, 0xda, 0xc2, 0x1d, 0x69, 0x07, 0xc5, 0x20, 0xb5, 0x5f, 0x29,
    -	0x70, 0x36, 0xdd, 0x3a, 0xea, 0xf7, 0xa1, 0xe4, 0x9f, 0xf8, 0x4a, 0xdb, 0xb0, 0x3a, 0x7e, 0xb0,
    -	0x7d, 0x79, 0xbc, 0x76, 0x82, 0xe1, 0x84, 0xb4, 0x85, 0xc9, 0x4f, 0x8b, 0x33, 0x95, 0x24, 0x30,
    -	0x82, 0x62, 0xac, 0xb4, 0x5f, 0xe7, 0x60, 0x5e, 0x02, 0x39, 0x86, 0x90, 0x79, 0x4b, 0x0a, 0x99,
    -	0x46, 0x96, 0x63, 0xa6, 0xc5, 0xca, 0xad, 0x58, 0xac, 0x9c, 0xcf, 0x42, 0x74, 0x74, 0x90, 0x0c,
    -	0x14, 0xa8, 0x48, 0xf0, 0x57, 0x1c, 0x9b, 0xf4, 0x3a, 0x5e, 0xcb, 0x7a, 0x1b, 0xbb, 0xd8, 0xab,
    -	0x28, 0x1b, 0x50, 0x34, 0xba, 0xd6, 0x35, 0xd7, 0xe9, 0x75, 0xe3, 0x39, 0xf7, 0xf2, 0xce, 0x36,
    -	0x5b, 0x47, 0x01, 0x84, 0x07, 0xed, 0x4b, 0xc4, 0xa4, 0x9d, 0x89, 0x76, 0x82, 0xa2, 0x45, 0x0c,
    -	0x20, 0x82, 0x6a, 0x55, 0x48, 0xad, 0x56, 0x3a, 0xe4, 0x7b, 0x96, 0x29, 0x6a, 0xfe, 0x8b, 0x02,
    -	0x20, 0x7f, 0x73, 0x7b, 0xeb, 0xd3, 0x7e, 0xf5, 0x85, 0xb4, 0x8b, 0x27, 0xbd, 0xdf, 0xc5, 0xa4,
    -	0x7e, 0x73, 0x7b, 0x0b, 0x79, 0xc8, 0xda, 0x47, 0x0a, 0x9c, 0x92, 0x0e, 0x79, 0x0c, 0x29, 0x60,
    -	0x47, 0x4e, 0x01, 0x5f, 0xca, 0x60, 0xb2, 0x94, 0xd8, 0xff, 0x59, 0x1e, 0x56, 0x25, 0xb8, 0x48,
    -	0xbb, 0xfe, 0xe4, 0xdd, 0xfa, 0x7d, 0x98, 0x0f, 0xee, 0xef, 0x57, 0x5d, 0xa7, 0x23, 0xfc, 0xfb,
    -	0xab, 0x19, 0xce, 0x15, 0xb9, 0x70, 0xf8, 0xce, 0xc5, 0x5b, 0xbe, 0x6b, 0x51, 0xc2, 0x48, 0xe6,
    -	0x93, 0xf9, 0xee, 0xac, 0xb6, 0xa1, 0x64, 0x4a, 0xb7, 0xae, 0x72, 0x61, 0x9c, 0x07, 0x04, 0xf9,
    -	0xa6, 0x16, 0xa6, 0x18, 0x79, 0x1d, 0xc5, 0x68, 0x6b, 0xff, 0x50, 0xe0, 0xb9, 0x94, 0x53, 0x1e,
    -	0x83, 0x97, 0xbd, 0x2b, 0x7b, 0xd9, 0xcb, 0x13, 0x59, 0x23, 0xc5, 0xdf, 0x7e, 0xae, 0x40, 0xed,
    -	0x28, 0xfb, 0x65, 0x4c, 0x0e, 0x35, 0x28, 0xdc, 0xb1, 0x6c, 0x93, 0xf9, 0x4e, 0x24, 0xdc, 0xbf,
    -	0x6e, 0xd9, 0x26, 0x62, 0x3b, 0x41, 0x42, 0xc8, 0xa7, 0x5e, 0xfc, 0x1e, 0x28, 0xf0, 0xfc, 0xc8,
    -	0xea, 0x30, 0x46, 0x0b, 0xfc, 0x15, 0x58, 0xe8, 0xd9, 0xa4, 0x67, 0x51, 0xcf, 0x61, 0xa2, 0x05,
    -	0x6f, 0x69, 0xd0, 0xaf, 0x2e, 0xdc, 0x94, 0xb7, 0x50, 0x1c, 0x56, 0xfb, 0x6d, 0x2e, 0x96, 0x4f,
    -	0x58, 0xf9, 0xbd, 0x06, 0xa7, 0x22, 0xe5, 0x87, 0x90, 0xc8, 0x15, 0xff, 0x8c, 0x90, 0x21, 0x8a,
    -	0xc5, 0x01, 0xd0, 0x30, 0x8e, 0x17, 0x6a, 0xdd, 0xa8, 0xaa, 0x3f, 0xcb, 0x50, 0x93, 0x36, 0x90,
    -	0xcc, 0x47, 0xdd, 0x81, 0x52, 0xf8, 0x92, 0x71, 0xdd, 0x6b, 0x21, 0xb8, 0x19, 0xd6, 0xfd, 0x58,
    -	0xb8, 0x2c, 0xed, 0x7e, 0x3a, 0xb4, 0x82, 0x62, 0xf8, 0xda, 0x7f, 0x73, 0xb0, 0x94, 0x50, 0x8e,
    -	0x26, 0x7a, 0x07, 0xf9, 0x0e, 0x40, 0x48, 0x5d, 0xe8, 0xa4, 0x9e, 0xed, 0x35, 0x47, 0x2f, 0xb1,
    -	0xcb, 0x4a, 0xb8, 0x1a, 0xa1, 0xa8, 0x12, 0x98, 0x75, 0x31, 0xc1, 0xee, 0x21, 0x36, 0xaf, 0x3a,
    -	0xae, 0x78, 0xf5, 0x78, 0x2d, 0x83, 0xd2, 0x87, 0x4a, 0xa7, 0xbe, 0x24, 0x8e, 0x34, 0x8b, 0x42,
    -	0xc2, 0x28, 0xca, 0x45, 0x6d, 0xc2, 0x8a, 0x89, 0xa3, 0xcf, 0x47, 0x2c, 0xad, 0x60, 0x93, 0x55,
    -	0xc4, 0x62, 0xf8, 0xf0, 0xb4, 0x95, 0x04, 0x84, 0x92, 0x71, 0xb5, 0xbf, 0x2b, 0xb0, 0x22, 0x49,
    -	0xf6, 0x0d, 0xdc, 0xe9, 0xb6, 0x0d, 0x8a, 0x8f, 0xa1, 0x4e, 0xdc, 0x92, 0xda, 0x9f, 0x57, 0x32,
    -	0xa8, 0xcf, 0x17, 0x32, 0xad, 0x0d, 0xd2, 0xfe, 0xa6, 0xc0, 0x99, 0x44, 0x8c, 0x63, 0x48, 0xb4,
    -	0xef, 0xc8, 0x89, 0xf6, 0xc2, 0x04, 0xe7, 0x4a, 0x49, 0xb3, 0x8f, 0xd2, 0x4e, 0xd5, 0xe4, 0xd7,
    -	0xa4, 0x67, 0xaf, 0x5f, 0xd5, 0x3e, 0xce, 0x4b, 0x6d, 0x37, 0x39, 0x8e, 0xfe, 0x44, 0xce, 0x28,
    -	0xb9, 0xb1, 0x32, 0xca, 0x50, 0xa2, 0xcd, 0x67, 0x4c, 0xb4, 0x84, 0x4c, 0x96, 0x68, 0x6f, 0xc1,
    -	0xbc, 0x5c, 0x7d, 0x0a, 0x63, 0x0e, 0x1c, 0x18, 0xe9, 0xa6, 0x54, 0x9d, 0x64, 0x4a, 0xea, 0x1b,
    -	0xb0, 0x4c, 0xa8, 0xdb, 0x6b, 0xd1, 0x9e, 0x8b, 0xcd, 0xc8, 0x8b, 0xf1, 0x09, 0x96, 0x4f, 0xca,
    -	0x83, 0x7e, 0x75, 0xb9, 0x99, 0xb0, 0x8f, 0x12, 0xb1, 0xe2, 0x9d, 0x33, 0x21, 0x4f, 0x73, 0xe7,
    -	0x4c, 0xd2, 0x3a, 0x99, 0x8f, 0xe4, 0xce, 0x39, 0x6a, 0xb5, 0x67, 0xa1, 0x73, 0x1e, 0xe1, 0x65,
    -	0x23, 0x3b, 0x67, 0x9a, 0x30, 0x38, 0xe0, 0x55, 0xed, 0x88, 0xb2, 0x19, 0x9f, 0x0f, 0x64, 0x9a,
    -	0x1c, 0xbc, 0x0d, 0xd3, 0xb7, 0xd9, 0x9b, 0xe6, 0x98, 0x7d, 0xb7, 0x7f, 0x50, 0xfe, 0x10, 0xaa,
    -	0x2f, 0x08, 0x56, 0xd3, 0xfc, 0x9b, 0x20, 0x9f, 0x5a, 0xbc, 0xd3, 0x8e, 0x6a, 0xe5, 0x69, 0xee,
    -	0xb4, 0xa3, 0x72, 0xa6, 0xf8, 0xe7, 0x9f, 0xe5, 0x4e, 0x3b, 0xd1, 0xde, 0xc7, 0xdf, 0x69, 0x7b,
    -	0x37, 0x2f, 0xef, 0x2f, 0xe9, 0x1a, 0x2d, 0xff, 0x86, 0x1e, 0xdc, 0xbc, 0x6e, 0xf8, 0x1b, 0x28,
    -	0x84, 0xd1, 0x3e, 0x56, 0xa0, 0x24, 0x9b, 0x73, 0xa2, 0x46, 0xef, 0x81, 0x02, 0x4b, 0xae, 0x44,
    -	0x26, 0x3a, 0xc0, 0x3b, 0x9f, 0xc5, 0x9d, 0xf8, 0xf8, 0xee, 0x39, 0xc1, 0x70, 0x29, 0x61, 0x13,
    -	0x25, 0xb1, 0xd2, 0x7e, 0xa8, 0x40, 0x12, 0xb0, 0x6a, 0xa7, 0x4c, 0x5f, 0x37, 0xb3, 0x3c, 0x1d,
    -	0x0b, 0x4f, 0x1f, 0x67, 0xe6, 0xfa, 0xcf, 0x88, 0x46, 0xf9, 0xc0, 0x7a, 0x22, 0x8d, 0xd6, 0xa0,
    -	0xc0, 0xc2, 0x22, 0xe6, 0x0d, 0x5b, 0x06, 0x35, 0x10, 0xdb, 0x51, 0x5d, 0x28, 0x85, 0x05, 0xc0,
    -	0x5b, 0x67, 0x05, 0xe3, 0xc8, 0x27, 0xdf, 0xb0, 0x94, 0xc4, 0xe6, 0xef, 0xec, 0x70, 0x4d, 0x89,
    -	0x22, 0x8a, 0x71, 0xd0, 0x3e, 0x50, 0xc2, 0x36, 0x81, 0xab, 0xf7, 0x6e, 0x8a, 0x7a, 0x33, 0x8d,
    -	0x27, 0x82, 0x1f, 0x63, 0x69, 0xf8, 0x27, 0x39, 0x58, 0x88, 0xcd, 0x2e, 0x13, 0x27, 0xae, 0xca,
    -	0x93, 0x9e, 0xb8, 0xfe, 0x40, 0x81, 0x65, 0x57, 0x16, 0x24, 0xea, 0xf6, 0x9b, 0x99, 0xc6, 0xaf,
    -	0xdc, 0xef, 0xd7, 0x04, 0xfb, 0xe5, 0xa4, 0x5d, 0x94, 0xc8, 0x4d, 0xfb, 0x91, 0x02, 0x89, 0xe0,
    -	0xaa, 0x93, 0x62, 0x9b, 0x0b, 0xd9, 0x6c, 0xc3, 0xa7, 0xc3, 0xe3, 0x58, 0xe6, 0x4f, 0x91, 0xc7,
    -	0x5b, 0x3e, 0x2f, 0x79, 0xf2, 0xb5, 0x7a, 0x03, 0x8a, 0xb6, 0x63, 0xe2, 0x48, 0x0f, 0x19, 0x24,
    -	0xd9, 0x1b, 0x62, 0x1d, 0x05, 0x10, 0xb1, 0x50, 0xcc, 0x8f, 0x15, 0x8a, 0x07, 0x30, 0xef, 0x46,
    -	0x7d, 0x5e, 0xb4, 0x7e, 0x63, 0x76, 0x39, 0xdc, 0xae, 0x2b, 0x82, 0x87, 0x1c, 0x3d, 0x48, 0x26,
    -	0x2c, 0xf5, 0x6e, 0x4c, 0x7f, 0x4f, 0x6d, 0xef, 0xc6, 0x27, 0xad, 0xc9, 0xb5, 0xf1, 0x0f, 0x79,
    -	0x28, 0xa7, 0x65, 0x19, 0xf5, 0x03, 0x05, 0x56, 0x78, 0x20, 0xc5, 0xca, 0xe6, 0x64, 0xe1, 0x1a,
    -	0xdc, 0xb6, 0x77, 0x93, 0x68, 0xa2, 0x64, 0x56, 0xb2, 0x10, 0xd1, 0xa7, 0x97, 0xc9, 0xfe, 0x4b,
    -	0x63, 0x58, 0x08, 0xe9, 0x39, 0x27, 0x99, 0x95, 0xe4, 0xb8, 0x85, 0x23, 0x1d, 0xf7, 0xbb, 0x30,
    -	0xed, 0xb2, 0x07, 0x11, 0xef, 0x5e, 0x30, 0xc6, 0xe8, 0x33, 0xf9, 0xdf, 0x7e, 0xc2, 0x5e, 0x8d,
    -	0x7f, 0x13, 0xe4, 0x53, 0xd5, 0x7e, 0xa7, 0xc0, 0x50, 0xce, 0x9b, 0xa8, 0x72, 0x19, 0x00, 0xdd,
    -	0xff, 0x53, 0xa1, 0x01, 0x8b, 0x88, 0x16, 0x23, 0x44, 0x75, 0xfd, 0xe1, 0xe3, 0xca, 0xd4, 0xa3,
    -	0xc7, 0x95, 0xa9, 0x4f, 0x1e, 0x57, 0xa6, 0x1e, 0x0c, 0x2a, 0xca, 0xc3, 0x41, 0x45, 0x79, 0x34,
    -	0xa8, 0x28, 0x9f, 0x0c, 0x2a, 0xca, 0xbf, 0x06, 0x15, 0xe5, 0xc3, 0x7f, 0x57, 0xa6, 0xde, 0x5d,
    -	0x1b, 0xf5, 0x0f, 0x82, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x94, 0xb7, 0xe5, 0x3f, 0x28,
    -	0x00, 0x00,
    -}
    -
    -func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i--
    -	if m.Shareable {
    -		dAtA[i] = 1
    -	} else {
    -		dAtA[i] = 0
    -	}
    -	i--
    -	dAtA[i] = 0x18
    -	if m.AvailableOnNodes != nil {
    -		{
    -			size, err := m.AvailableOnNodes.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	if len(m.ResourceHandles) > 0 {
    -		for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *AllocationResultModel) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *AllocationResultModel) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *AllocationResultModel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		{
    -			size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *DriverAllocationResult) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *DriverAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *DriverAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.AllocationResultModel.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.VendorRequestParameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *DriverRequests) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *DriverRequests) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *DriverRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Requests) > 0 {
    -		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    -	}
    -	{
    -		size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesAllocationResult) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesAttribute) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesAttribute) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.NamedResourcesAttributeValue.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesAttributeValue) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesAttributeValue) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesAttributeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.VersionValue != nil {
    -		i -= len(*m.VersionValue)
    -		copy(dAtA[i:], *m.VersionValue)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
    -		i--
    -		dAtA[i] = 0x52
    -	}
    -	if m.StringSliceValue != nil {
    -		{
    -			size, err := m.StringSliceValue.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x4a
    -	}
    -	if m.IntSliceValue != nil {
    -		{
    -			size, err := m.IntSliceValue.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x42
    -	}
    -	if m.IntValue != nil {
    -		i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
    -		i--
    -		dAtA[i] = 0x38
    -	}
    -	if m.QuantityValue != nil {
    -		{
    -			size, err := m.QuantityValue.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x32
    -	}
    -	if m.StringValue != nil {
    -		i -= len(*m.StringValue)
    -		copy(dAtA[i:], *m.StringValue)
    -		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
    -		i--
    -		dAtA[i] = 0x2a
    -	}
    -	if m.BoolValue != nil {
    -		i--
    -		if *m.BoolValue {
    -			dAtA[i] = 1
    -		} else {
    -			dAtA[i] = 0
    -		}
    -		i--
    -		dAtA[i] = 0x10
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesFilter) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesFilter) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Selector)
    -	copy(dAtA[i:], m.Selector)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesInstance) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesInstance) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Attributes) > 0 {
    -		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesIntSlice) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesIntSlice) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesIntSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Ints) > 0 {
    -		for iNdEx := len(m.Ints) - 1; iNdEx >= 0; iNdEx-- {
    -			i = encodeVarintGenerated(dAtA, i, uint64(m.Ints[iNdEx]))
    -			i--
    -			dAtA[i] = 0x8
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesRequest) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesRequest) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Selector)
    -	copy(dAtA[i:], m.Selector)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesResources) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesResources) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Instances) > 0 {
    -		for iNdEx := len(m.Instances) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Instances[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *NamedResourcesStringSlice) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *NamedResourcesStringSlice) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *NamedResourcesStringSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Strings) > 0 {
    -		for iNdEx := len(m.Strings) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.Strings[iNdEx])
    -			copy(dAtA[i:], m.Strings[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strings[iNdEx])))
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x1a
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.PotentialNodes) > 0 {
    -		for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.PotentialNodes[iNdEx])
    -			copy(dAtA[i:], m.PotentialNodes[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx])))
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	i -= len(m.SelectedNode)
    -	copy(dAtA[i:], m.SelectedNode)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.ResourceClaims) > 0 {
    -		for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0xa
    -		}
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x1a
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.UID)
    -	copy(dAtA[i:], m.UID)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
    -	i--
    -	dAtA[i] = 0x2a
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0x22
    -	i -= len(m.Resource)
    -	copy(dAtA[i:], m.Resource)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    -	i--
    -	dAtA[i] = 0x1a
    -	i -= len(m.APIGroup)
    -	copy(dAtA[i:], m.APIGroup)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimParameters) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimParameters) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.DriverRequests) > 0 {
    -		for iNdEx := len(m.DriverRequests) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.DriverRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x22
    -		}
    -	}
    -	i--
    -	if m.Shareable {
    -		dAtA[i] = 1
    -	} else {
    -		dAtA[i] = 0
    -	}
    -	i--
    -	dAtA[i] = 0x18
    -	if m.GeneratedFrom != nil {
    -		{
    -			size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimParametersList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimParametersList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimParametersReference) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0x1a
    -	i -= len(m.Kind)
    -	copy(dAtA[i:], m.Kind)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.APIGroup)
    -	copy(dAtA[i:], m.APIGroup)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.UnsuitableNodes) > 0 {
    -		for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- {
    -			i -= len(m.UnsuitableNodes[iNdEx])
    -			copy(dAtA[i:], m.UnsuitableNodes[iNdEx])
    -			i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx])))
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.AllocationMode)
    -	copy(dAtA[i:], m.AllocationMode)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
    -	i--
    -	dAtA[i] = 0x1a
    -	if m.ParametersRef != nil {
    -		{
    -			size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	i -= len(m.ResourceClassName)
    -	copy(dAtA[i:], m.ResourceClassName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClassName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i--
    -	if m.DeallocationRequested {
    -		dAtA[i] = 1
    -	} else {
    -		dAtA[i] = 0
    -	}
    -	i--
    -	dAtA[i] = 0x20
    -	if len(m.ReservedFor) > 0 {
    -		for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    -	}
    -	if m.Allocation != nil {
    -		{
    -			size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClass) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClass) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.StructuredParameters != nil {
    -		i--
    -		if *m.StructuredParameters {
    -			dAtA[i] = 1
    -		} else {
    -			dAtA[i] = 0
    -		}
    -		i--
    -		dAtA[i] = 0x28
    -	}
    -	if m.SuitableNodes != nil {
    -		{
    -			size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x22
    -	}
    -	if m.ParametersRef != nil {
    -		{
    -			size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x1a
    -	}
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClassList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClassList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClassParameters) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClassParameters) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClassParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Filters) > 0 {
    -		for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x22
    -		}
    -	}
    -	if len(m.VendorParameters) > 0 {
    -		for iNdEx := len(m.VendorParameters) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.VendorParameters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x1a
    -		}
    -	}
    -	if m.GeneratedFrom != nil {
    -		{
    -			size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x12
    -	}
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClassParametersList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClassParametersList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClassParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceClassParametersReference) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	i -= len(m.Namespace)
    -	copy(dAtA[i:], m.Namespace)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
    -	i--
    -	dAtA[i] = 0x22
    -	i -= len(m.Name)
    -	copy(dAtA[i:], m.Name)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    -	i--
    -	dAtA[i] = 0x1a
    -	i -= len(m.Kind)
    -	copy(dAtA[i:], m.Kind)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.APIGroup)
    -	copy(dAtA[i:], m.APIGroup)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceFilter) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceFilter) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.ResourceFilterModel.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceFilterModel) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceFilterModel) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceFilterModel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		{
    -			size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceHandle) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.StructuredData != nil {
    -		{
    -			size, err := m.StructuredData.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0x2a
    -	}
    -	i -= len(m.Data)
    -	copy(dAtA[i:], m.Data)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data)))
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceModel) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceModel) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceModel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		{
    -			size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceRequest) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceRequest) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.ResourceRequestModel.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceRequestModel) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceRequestModel) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceRequestModel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		{
    -			size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i])
    -			if err != nil {
    -				return 0, err
    -			}
    -			i -= size
    -			i = encodeVarintGenerated(dAtA, i, uint64(size))
    -		}
    -		i--
    -		dAtA[i] = 0xa
    -	}
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.ResourceModel.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x22
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0x1a
    -	i -= len(m.NodeName)
    -	copy(dAtA[i:], m.NodeName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Items) > 0 {
    -		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x12
    -		}
    -	}
    -	{
    -		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *StructuredResourceHandle) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *StructuredResourceHandle) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *StructuredResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	if len(m.Results) > 0 {
    -		for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
    -			{
    -				size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    -				if err != nil {
    -					return 0, err
    -				}
    -				i -= size
    -				i = encodeVarintGenerated(dAtA, i, uint64(size))
    -			}
    -			i--
    -			dAtA[i] = 0x2a
    -		}
    -	}
    -	i -= len(m.NodeName)
    -	copy(dAtA[i:], m.NodeName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
    -	i--
    -	dAtA[i] = 0x22
    -	{
    -		size, err := m.VendorClaimParameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	{
    -		size, err := m.VendorClassParameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func (m *VendorParameters) Marshal() (dAtA []byte, err error) {
    -	size := m.Size()
    -	dAtA = make([]byte, size)
    -	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    -	if err != nil {
    -		return nil, err
    -	}
    -	return dAtA[:n], nil
    -}
    -
    -func (m *VendorParameters) MarshalTo(dAtA []byte) (int, error) {
    -	size := m.Size()
    -	return m.MarshalToSizedBuffer(dAtA[:size])
    -}
    -
    -func (m *VendorParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    -	i := len(dAtA)
    -	_ = i
    -	var l int
    -	_ = l
    -	{
    -		size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
    -		if err != nil {
    -			return 0, err
    -		}
    -		i -= size
    -		i = encodeVarintGenerated(dAtA, i, uint64(size))
    -	}
    -	i--
    -	dAtA[i] = 0x12
    -	i -= len(m.DriverName)
    -	copy(dAtA[i:], m.DriverName)
    -	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    -	i--
    -	dAtA[i] = 0xa
    -	return len(dAtA) - i, nil
    -}
    -
    -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    -	offset -= sovGenerated(v)
    -	base := offset
    -	for v >= 1<<7 {
    -		dAtA[offset] = uint8(v&0x7f | 0x80)
    -		v >>= 7
    -		offset++
    -	}
    -	dAtA[offset] = uint8(v)
    -	return base
    -}
    -func (m *AllocationResult) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.ResourceHandles) > 0 {
    -		for _, e := range m.ResourceHandles {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	if m.AvailableOnNodes != nil {
    -		l = m.AvailableOnNodes.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	n += 2
    -	return n
    -}
    -
    -func (m *AllocationResultModel) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		l = m.NamedResources.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *DriverAllocationResult) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.VendorRequestParameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.AllocationResultModel.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *DriverRequests) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.VendorParameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Requests) > 0 {
    -		for _, e := range m.Requests {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *NamedResourcesAllocationResult) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *NamedResourcesAttribute) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.NamedResourcesAttributeValue.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *NamedResourcesAttributeValue) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.BoolValue != nil {
    -		n += 2
    -	}
    -	if m.StringValue != nil {
    -		l = len(*m.StringValue)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.QuantityValue != nil {
    -		l = m.QuantityValue.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.IntValue != nil {
    -		n += 1 + sovGenerated(uint64(*m.IntValue))
    -	}
    -	if m.IntSliceValue != nil {
    -		l = m.IntSliceValue.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.StringSliceValue != nil {
    -		l = m.StringSliceValue.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.VersionValue != nil {
    -		l = len(*m.VersionValue)
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *NamedResourcesFilter) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Selector)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *NamedResourcesInstance) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Attributes) > 0 {
    -		for _, e := range m.Attributes {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *NamedResourcesIntSlice) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.Ints) > 0 {
    -		for _, e := range m.Ints {
    -			n += 1 + sovGenerated(uint64(e))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *NamedResourcesRequest) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Selector)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *NamedResourcesResources) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.Instances) > 0 {
    -		for _, e := range m.Instances {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *NamedResourcesStringSlice) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.Strings) > 0 {
    -		for _, s := range m.Strings {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *PodSchedulingContext) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Status.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *PodSchedulingContextList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *PodSchedulingContextSpec) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.SelectedNode)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.PotentialNodes) > 0 {
    -		for _, s := range m.PotentialNodes {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *PodSchedulingContextStatus) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if len(m.ResourceClaims) > 0 {
    -		for _, e := range m.ResourceClaims {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaim) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Status.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClaimConsumerReference) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.APIGroup)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Resource)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.UID)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClaimList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaimParameters) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.GeneratedFrom != nil {
    -		l = m.GeneratedFrom.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	n += 2
    -	if len(m.DriverRequests) > 0 {
    -		for _, e := range m.DriverRequests {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaimParametersList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaimParametersReference) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.APIGroup)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Kind)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClaimSchedulingStatus) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.UnsuitableNodes) > 0 {
    -		for _, s := range m.UnsuitableNodes {
    -			l = len(s)
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaimSpec) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.ResourceClassName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.ParametersRef != nil {
    -		l = m.ParametersRef.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	l = len(m.AllocationMode)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClaimStatus) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.Allocation != nil {
    -		l = m.Allocation.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if len(m.ReservedFor) > 0 {
    -		for _, e := range m.ReservedFor {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	n += 2
    -	return n
    -}
    -
    -func (m *ResourceClaimTemplate) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClaimTemplateList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClaimTemplateSpec) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Spec.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceClass) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.ParametersRef != nil {
    -		l = m.ParametersRef.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.SuitableNodes != nil {
    -		l = m.SuitableNodes.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if m.StructuredParameters != nil {
    -		n += 2
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClassList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClassParameters) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.GeneratedFrom != nil {
    -		l = m.GeneratedFrom.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	if len(m.VendorParameters) > 0 {
    -		for _, e := range m.VendorParameters {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	if len(m.Filters) > 0 {
    -		for _, e := range m.Filters {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClassParametersList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *ResourceClassParametersReference) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.APIGroup)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Kind)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Name)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Namespace)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceFilter) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.ResourceFilterModel.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceFilterModel) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		l = m.NamedResources.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *ResourceHandle) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.Data)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if m.StructuredData != nil {
    -		l = m.StructuredData.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *ResourceModel) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		l = m.NamedResources.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *ResourceRequest) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.VendorParameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.ResourceRequestModel.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceRequestModel) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	if m.NamedResources != nil {
    -		l = m.NamedResources.Size()
    -		n += 1 + l + sovGenerated(uint64(l))
    -	}
    -	return n
    -}
    -
    -func (m *ResourceSlice) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ObjectMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.NodeName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.ResourceModel.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func (m *ResourceSliceList) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.ListMeta.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Items) > 0 {
    -		for _, e := range m.Items {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *StructuredResourceHandle) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = m.VendorClassParameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.VendorClaimParameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = len(m.NodeName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	if len(m.Results) > 0 {
    -		for _, e := range m.Results {
    -			l = e.Size()
    -			n += 1 + l + sovGenerated(uint64(l))
    -		}
    -	}
    -	return n
    -}
    -
    -func (m *VendorParameters) Size() (n int) {
    -	if m == nil {
    -		return 0
    -	}
    -	var l int
    -	_ = l
    -	l = len(m.DriverName)
    -	n += 1 + l + sovGenerated(uint64(l))
    -	l = m.Parameters.Size()
    -	n += 1 + l + sovGenerated(uint64(l))
    -	return n
    -}
    -
    -func sovGenerated(x uint64) (n int) {
    -	return (math_bits.Len64(x|1) + 6) / 7
    -}
    -func sozGenerated(x uint64) (n int) {
    -	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    -}
    -func (this *AllocationResult) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForResourceHandles := "[]ResourceHandle{"
    -	for _, f := range this.ResourceHandles {
    -		repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForResourceHandles += "}"
    -	s := strings.Join([]string{`&AllocationResult{`,
    -		`ResourceHandles:` + repeatedStringForResourceHandles + `,`,
    -		`AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`,
    -		`Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *AllocationResultModel) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&AllocationResultModel{`,
    -		`NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesAllocationResult", "NamedResourcesAllocationResult", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *DriverAllocationResult) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&DriverAllocationResult{`,
    -		`VendorRequestParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorRequestParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`AllocationResultModel:` + strings.Replace(strings.Replace(this.AllocationResultModel.String(), "AllocationResultModel", "AllocationResultModel", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *DriverRequests) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForRequests := "[]ResourceRequest{"
    -	for _, f := range this.Requests {
    -		repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "ResourceRequest", "ResourceRequest", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForRequests += "}"
    -	s := strings.Join([]string{`&DriverRequests{`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`Requests:` + repeatedStringForRequests + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesAllocationResult) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesAllocationResult{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesAttribute) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesAttribute{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`NamedResourcesAttributeValue:` + strings.Replace(strings.Replace(this.NamedResourcesAttributeValue.String(), "NamedResourcesAttributeValue", "NamedResourcesAttributeValue", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesAttributeValue) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesAttributeValue{`,
    -		`BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
    -		`StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
    -		`QuantityValue:` + strings.Replace(fmt.Sprintf("%v", this.QuantityValue), "Quantity", "resource.Quantity", 1) + `,`,
    -		`IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
    -		`IntSliceValue:` + strings.Replace(this.IntSliceValue.String(), "NamedResourcesIntSlice", "NamedResourcesIntSlice", 1) + `,`,
    -		`StringSliceValue:` + strings.Replace(this.StringSliceValue.String(), "NamedResourcesStringSlice", "NamedResourcesStringSlice", 1) + `,`,
    -		`VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesFilter) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesFilter{`,
    -		`Selector:` + fmt.Sprintf("%v", this.Selector) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesInstance) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForAttributes := "[]NamedResourcesAttribute{"
    -	for _, f := range this.Attributes {
    -		repeatedStringForAttributes += strings.Replace(strings.Replace(f.String(), "NamedResourcesAttribute", "NamedResourcesAttribute", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForAttributes += "}"
    -	s := strings.Join([]string{`&NamedResourcesInstance{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Attributes:` + repeatedStringForAttributes + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesIntSlice) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesIntSlice{`,
    -		`Ints:` + fmt.Sprintf("%v", this.Ints) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesRequest) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesRequest{`,
    -		`Selector:` + fmt.Sprintf("%v", this.Selector) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesResources) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForInstances := "[]NamedResourcesInstance{"
    -	for _, f := range this.Instances {
    -		repeatedStringForInstances += strings.Replace(strings.Replace(f.String(), "NamedResourcesInstance", "NamedResourcesInstance", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForInstances += "}"
    -	s := strings.Join([]string{`&NamedResourcesResources{`,
    -		`Instances:` + repeatedStringForInstances + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *NamedResourcesStringSlice) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&NamedResourcesStringSlice{`,
    -		`Strings:` + fmt.Sprintf("%v", this.Strings) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *PodSchedulingContext) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&PodSchedulingContext{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`,
    -		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *PodSchedulingContextList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]PodSchedulingContext{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&PodSchedulingContextList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *PodSchedulingContextSpec) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&PodSchedulingContextSpec{`,
    -		`SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`,
    -		`PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *PodSchedulingContextStatus) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{"
    -	for _, f := range this.ResourceClaims {
    -		repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForResourceClaims += "}"
    -	s := strings.Join([]string{`&PodSchedulingContextStatus{`,
    -		`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaim) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaim{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    -		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimConsumerReference) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
    -		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
    -		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceClaim{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceClaimList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimParameters) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForDriverRequests := "[]DriverRequests{"
    -	for _, f := range this.DriverRequests {
    -		repeatedStringForDriverRequests += strings.Replace(strings.Replace(f.String(), "DriverRequests", "DriverRequests", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForDriverRequests += "}"
    -	s := strings.Join([]string{`&ResourceClaimParameters{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`,
    -		`Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`,
    -		`DriverRequests:` + repeatedStringForDriverRequests + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimParametersList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceClaimParameters{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimParameters", "ResourceClaimParameters", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceClaimParametersList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimParametersReference) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimParametersReference{`,
    -		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
    -		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimSchedulingStatus) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimSpec) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimSpec{`,
    -		`ResourceClassName:` + fmt.Sprintf("%v", this.ResourceClassName) + `,`,
    -		`ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`,
    -		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimStatus) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
    -	for _, f := range this.ReservedFor {
    -		repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForReservedFor += "}"
    -	s := strings.Join([]string{`&ResourceClaimStatus{`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
    -		`ReservedFor:` + repeatedStringForReservedFor + `,`,
    -		`DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimTemplate) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimTemplate{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimTemplateList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceClaimTemplate{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceClaimTemplateList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClaimTemplateSpec) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClass) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClass{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`,
    -		`SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`,
    -		`StructuredParameters:` + valueToStringGenerated(this.StructuredParameters) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClassList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceClass{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClass", "ResourceClass", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceClassList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClassParameters) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForVendorParameters := "[]VendorParameters{"
    -	for _, f := range this.VendorParameters {
    -		repeatedStringForVendorParameters += strings.Replace(strings.Replace(f.String(), "VendorParameters", "VendorParameters", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForVendorParameters += "}"
    -	repeatedStringForFilters := "[]ResourceFilter{"
    -	for _, f := range this.Filters {
    -		repeatedStringForFilters += strings.Replace(strings.Replace(f.String(), "ResourceFilter", "ResourceFilter", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForFilters += "}"
    -	s := strings.Join([]string{`&ResourceClassParameters{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`,
    -		`VendorParameters:` + repeatedStringForVendorParameters + `,`,
    -		`Filters:` + repeatedStringForFilters + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClassParametersList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceClassParameters{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClassParameters", "ResourceClassParameters", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceClassParametersList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceClassParametersReference) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceClassParametersReference{`,
    -		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
    -		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
    -		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    -		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceFilter) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceFilter{`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`ResourceFilterModel:` + strings.Replace(strings.Replace(this.ResourceFilterModel.String(), "ResourceFilterModel", "ResourceFilterModel", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceFilterModel) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceFilterModel{`,
    -		`NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesFilter", "NamedResourcesFilter", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceHandle) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceHandle{`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
    -		`StructuredData:` + strings.Replace(this.StructuredData.String(), "StructuredResourceHandle", "StructuredResourceHandle", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceModel) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceModel{`,
    -		`NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesResources", "NamedResourcesResources", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceRequest) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceRequest{`,
    -		`VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`ResourceRequestModel:` + strings.Replace(strings.Replace(this.ResourceRequestModel.String(), "ResourceRequestModel", "ResourceRequestModel", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceRequestModel) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceRequestModel{`,
    -		`NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesRequest", "NamedResourcesRequest", 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceSlice) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&ResourceSlice{`,
    -		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    -		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`ResourceModel:` + strings.Replace(strings.Replace(this.ResourceModel.String(), "ResourceModel", "ResourceModel", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *ResourceSliceList) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForItems := "[]ResourceSlice{"
    -	for _, f := range this.Items {
    -		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForItems += "}"
    -	s := strings.Join([]string{`&ResourceSliceList{`,
    -		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    -		`Items:` + repeatedStringForItems + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *StructuredResourceHandle) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	repeatedStringForResults := "[]DriverAllocationResult{"
    -	for _, f := range this.Results {
    -		repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DriverAllocationResult", "DriverAllocationResult", 1), `&`, ``, 1) + ","
    -	}
    -	repeatedStringForResults += "}"
    -	s := strings.Join([]string{`&StructuredResourceHandle{`,
    -		`VendorClassParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClassParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`VendorClaimParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClaimParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
    -		`Results:` + repeatedStringForResults + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func (this *VendorParameters) String() string {
    -	if this == nil {
    -		return "nil"
    -	}
    -	s := strings.Join([]string{`&VendorParameters{`,
    -		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    -		`Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    -		`}`,
    -	}, "")
    -	return s
    -}
    -func valueToStringGenerated(v interface{}) string {
    -	rv := reflect.ValueOf(v)
    -	if rv.IsNil() {
    -		return "nil"
    -	}
    -	pv := reflect.Indirect(rv).Interface()
    -	return fmt.Sprintf("*%v", pv)
    -}
    -func (m *AllocationResult) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{})
    -			if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field AvailableOnNodes", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.AvailableOnNodes == nil {
    -				m.AvailableOnNodes = &v1.NodeSelector{}
    -			}
    -			if err := m.AvailableOnNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.Shareable = bool(v != 0)
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *AllocationResultModel) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: AllocationResultModel: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: AllocationResultModel: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.NamedResources == nil {
    -				m.NamedResources = &NamedResourcesAllocationResult{}
    -			}
    -			if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *DriverAllocationResult) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: DriverAllocationResult: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: DriverAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorRequestParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.VendorRequestParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field AllocationResultModel", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.AllocationResultModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *DriverRequests) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: DriverRequests: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: DriverRequests: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Requests = append(m.Requests, ResourceRequest{})
    -			if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesAllocationResult) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesAllocationResult: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesAttribute) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesAttribute: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamedResourcesAttributeValue", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.NamedResourcesAttributeValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesAttributeValue) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesAttributeValue: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesAttributeValue: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 2:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			b := bool(v != 0)
    -			m.BoolValue = &b
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.StringValue = &s
    -			iNdEx = postIndex
    -		case 6:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field QuantityValue", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.QuantityValue == nil {
    -				m.QuantityValue = &resource.Quantity{}
    -			}
    -			if err := m.QuantityValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 7:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
    -			}
    -			var v int64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.IntValue = &v
    -		case 8:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field IntSliceValue", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.IntSliceValue == nil {
    -				m.IntSliceValue = &NamedResourcesIntSlice{}
    -			}
    -			if err := m.IntSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 9:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field StringSliceValue", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.StringSliceValue == nil {
    -				m.StringSliceValue = &NamedResourcesStringSlice{}
    -			}
    -			if err := m.StringSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 10:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			s := string(dAtA[iNdEx:postIndex])
    -			m.VersionValue = &s
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesFilter) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesFilter: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesFilter: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Selector = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesInstance) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesInstance: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesInstance: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Attributes = append(m.Attributes, NamedResourcesAttribute{})
    -			if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesIntSlice) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesIntSlice: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesIntSlice: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType == 0 {
    -				var v int64
    -				for shift := uint(0); ; shift += 7 {
    -					if shift >= 64 {
    -						return ErrIntOverflowGenerated
    -					}
    -					if iNdEx >= l {
    -						return io.ErrUnexpectedEOF
    -					}
    -					b := dAtA[iNdEx]
    -					iNdEx++
    -					v |= int64(b&0x7F) << shift
    -					if b < 0x80 {
    -						break
    -					}
    -				}
    -				m.Ints = append(m.Ints, v)
    -			} else if wireType == 2 {
    -				var packedLen int
    -				for shift := uint(0); ; shift += 7 {
    -					if shift >= 64 {
    -						return ErrIntOverflowGenerated
    -					}
    -					if iNdEx >= l {
    -						return io.ErrUnexpectedEOF
    -					}
    -					b := dAtA[iNdEx]
    -					iNdEx++
    -					packedLen |= int(b&0x7F) << shift
    -					if b < 0x80 {
    -						break
    -					}
    -				}
    -				if packedLen < 0 {
    -					return ErrInvalidLengthGenerated
    -				}
    -				postIndex := iNdEx + packedLen
    -				if postIndex < 0 {
    -					return ErrInvalidLengthGenerated
    -				}
    -				if postIndex > l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				var elementCount int
    -				var count int
    -				for _, integer := range dAtA[iNdEx:postIndex] {
    -					if integer < 128 {
    -						count++
    -					}
    -				}
    -				elementCount = count
    -				if elementCount != 0 && len(m.Ints) == 0 {
    -					m.Ints = make([]int64, 0, elementCount)
    -				}
    -				for iNdEx < postIndex {
    -					var v int64
    -					for shift := uint(0); ; shift += 7 {
    -						if shift >= 64 {
    -							return ErrIntOverflowGenerated
    -						}
    -						if iNdEx >= l {
    -							return io.ErrUnexpectedEOF
    -						}
    -						b := dAtA[iNdEx]
    -						iNdEx++
    -						v |= int64(b&0x7F) << shift
    -						if b < 0x80 {
    -							break
    -						}
    -					}
    -					m.Ints = append(m.Ints, v)
    -				}
    -			} else {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Ints", wireType)
    -			}
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesRequest: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Selector = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesResources: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesResources: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Instances = append(m.Instances, NamedResourcesInstance{})
    -			if err := m.Instances[len(m.Instances)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: NamedResourcesStringSlice: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: NamedResourcesStringSlice: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Strings", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Strings = append(m.Strings, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, PodSchedulingContext{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.SelectedNode = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{})
    -			if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.APIGroup = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Resource = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceClaim{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimParameters: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimParameters: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.GeneratedFrom == nil {
    -				m.GeneratedFrom = &ResourceClaimParametersReference{}
    -			}
    -			if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.Shareable = bool(v != 0)
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverRequests", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverRequests = append(m.DriverRequests, DriverRequests{})
    -			if err := m.DriverRequests[len(m.DriverRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimParametersList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimParametersList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceClaimParameters{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimParametersReference: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.APIGroup = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Kind = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex]))
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClassName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ResourceClassName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.ParametersRef == nil {
    -				m.ParametersRef = &ResourceClaimParametersReference{}
    -			}
    -			if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.AllocationMode = AllocationMode(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.Allocation == nil {
    -				m.Allocation = &AllocationResult{}
    -			}
    -			if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
    -			if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			m.DeallocationRequested = bool(v != 0)
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceClaimTemplate{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClass) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClass: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClass: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.ParametersRef == nil {
    -				m.ParametersRef = &ResourceClassParametersReference{}
    -			}
    -			if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.SuitableNodes == nil {
    -				m.SuitableNodes = &v1.NodeSelector{}
    -			}
    -			if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 0 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field StructuredParameters", wireType)
    -			}
    -			var v int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				v |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			b := bool(v != 0)
    -			m.StructuredParameters = &b
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClassList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClassList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceClass{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClassParameters: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClassParameters: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.GeneratedFrom == nil {
    -				m.GeneratedFrom = &ResourceClassParametersReference{}
    -			}
    -			if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.VendorParameters = append(m.VendorParameters, VendorParameters{})
    -			if err := m.VendorParameters[len(m.VendorParameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Filters = append(m.Filters, ResourceFilter{})
    -			if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClassParametersList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClassParametersList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceClassParameters{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceClassParametersReference: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.APIGroup = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Kind = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Name = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Namespace = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceFilter) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceFilter: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceFilter: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceFilterModel", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ResourceFilterModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceFilterModel: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceFilterModel: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.NamedResources == nil {
    -				m.NamedResources = &NamedResourcesFilter{}
    -			}
    -			if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceHandle) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Data = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field StructuredData", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.StructuredData == nil {
    -				m.StructuredData = &StructuredResourceHandle{}
    -			}
    -			if err := m.StructuredData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceModel) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceModel: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceModel: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.NamedResources == nil {
    -				m.NamedResources = &NamedResourcesResources{}
    -			}
    -			if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceRequest) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceRequest: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceRequestModel", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ResourceRequestModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceRequestModel) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceRequestModel: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceRequestModel: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if m.NamedResources == nil {
    -				m.NamedResources = &NamedResourcesRequest{}
    -			}
    -			if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.NodeName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 3:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ResourceModel", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ResourceModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Items = append(m.Items, ResourceSlice{})
    -			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: StructuredResourceHandle: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: StructuredResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorClassParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.VendorClassParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field VendorClaimParameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.VendorClaimParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		case 4:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.NodeName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 5:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.Results = append(m.Results, DriverAllocationResult{})
    -			if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func (m *VendorParameters) Unmarshal(dAtA []byte) error {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	for iNdEx < l {
    -		preIndex := iNdEx
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= uint64(b&0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		fieldNum := int32(wire >> 3)
    -		wireType := int(wire & 0x7)
    -		if wireType == 4 {
    -			return fmt.Errorf("proto: VendorParameters: wiretype end group for non-group")
    -		}
    -		if fieldNum <= 0 {
    -			return fmt.Errorf("proto: VendorParameters: illegal tag %d (wire type %d)", fieldNum, wire)
    -		}
    -		switch fieldNum {
    -		case 1:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    -			}
    -			var stringLen uint64
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				stringLen |= uint64(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			intStringLen := int(stringLen)
    -			if intStringLen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + intStringLen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			m.DriverName = string(dAtA[iNdEx:postIndex])
    -			iNdEx = postIndex
    -		case 2:
    -			if wireType != 2 {
    -				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    -			}
    -			var msglen int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				msglen |= int(b&0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if msglen < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			postIndex := iNdEx + msglen
    -			if postIndex < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if postIndex > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    -				return err
    -			}
    -			iNdEx = postIndex
    -		default:
    -			iNdEx = preIndex
    -			skippy, err := skipGenerated(dAtA[iNdEx:])
    -			if err != nil {
    -				return err
    -			}
    -			if (skippy < 0) || (iNdEx+skippy) < 0 {
    -				return ErrInvalidLengthGenerated
    -			}
    -			if (iNdEx + skippy) > l {
    -				return io.ErrUnexpectedEOF
    -			}
    -			iNdEx += skippy
    -		}
    -	}
    -
    -	if iNdEx > l {
    -		return io.ErrUnexpectedEOF
    -	}
    -	return nil
    -}
    -func skipGenerated(dAtA []byte) (n int, err error) {
    -	l := len(dAtA)
    -	iNdEx := 0
    -	depth := 0
    -	for iNdEx < l {
    -		var wire uint64
    -		for shift := uint(0); ; shift += 7 {
    -			if shift >= 64 {
    -				return 0, ErrIntOverflowGenerated
    -			}
    -			if iNdEx >= l {
    -				return 0, io.ErrUnexpectedEOF
    -			}
    -			b := dAtA[iNdEx]
    -			iNdEx++
    -			wire |= (uint64(b) & 0x7F) << shift
    -			if b < 0x80 {
    -				break
    -			}
    -		}
    -		wireType := int(wire & 0x7)
    -		switch wireType {
    -		case 0:
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return 0, ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return 0, io.ErrUnexpectedEOF
    -				}
    -				iNdEx++
    -				if dAtA[iNdEx-1] < 0x80 {
    -					break
    -				}
    -			}
    -		case 1:
    -			iNdEx += 8
    -		case 2:
    -			var length int
    -			for shift := uint(0); ; shift += 7 {
    -				if shift >= 64 {
    -					return 0, ErrIntOverflowGenerated
    -				}
    -				if iNdEx >= l {
    -					return 0, io.ErrUnexpectedEOF
    -				}
    -				b := dAtA[iNdEx]
    -				iNdEx++
    -				length |= (int(b) & 0x7F) << shift
    -				if b < 0x80 {
    -					break
    -				}
    -			}
    -			if length < 0 {
    -				return 0, ErrInvalidLengthGenerated
    -			}
    -			iNdEx += length
    -		case 3:
    -			depth++
    -		case 4:
    -			if depth == 0 {
    -				return 0, ErrUnexpectedEndOfGroupGenerated
    -			}
    -			depth--
    -		case 5:
    -			iNdEx += 4
    -		default:
    -			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    -		}
    -		if iNdEx < 0 {
    -			return 0, ErrInvalidLengthGenerated
    -		}
    -		if depth == 0 {
    -			return iNdEx, nil
    -		}
    -	}
    -	return 0, io.ErrUnexpectedEOF
    -}
    -
    -var (
    -	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    -	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    -	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    -)
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto
    deleted file mode 100644
    index 4a6a5bab6c..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto
    +++ /dev/null
    @@ -1,749 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -
    -// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    -
    -syntax = "proto2";
    -
    -package k8s.io.api.resource.v1alpha2;
    -
    -import "k8s.io/api/core/v1/generated.proto";
    -import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
    -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    -import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    -
    -// Package-wide variables from generator "generated".
    -option go_package = "k8s.io/api/resource/v1alpha2";
    -
    -// AllocationResult contains attributes of an allocated resource.
    -message AllocationResult {
    -  // ResourceHandles contain the state associated with an allocation that
    -  // should be maintained throughout the lifetime of a claim. Each
    -  // ResourceHandle contains data that should be passed to a specific kubelet
    -  // plugin once it lands on a node. This data is returned by the driver
    -  // after a successful allocation and is opaque to Kubernetes. Driver
    -  // documentation may explain to users how to interpret this data if needed.
    -  //
    -  // Setting this field is optional. It has a maximum size of 32 entries.
    -  // If null (or empty), it is assumed this allocation will be processed by a
    -  // single kubelet plugin with no ResourceHandle data attached. The name of
    -  // the kubelet plugin invoked will match the DriverName set in the
    -  // ResourceClaimStatus this AllocationResult is embedded in.
    -  //
    -  // +listType=atomic
    -  // +optional
    -  repeated ResourceHandle resourceHandles = 1;
    -
    -  // This field will get set by the resource driver after it has allocated
    -  // the resource to inform the scheduler where it can schedule Pods using
    -  // the ResourceClaim.
    -  //
    -  // Setting this field is optional. If null, the resource is available
    -  // everywhere.
    -  // +optional
    -  optional k8s.io.api.core.v1.NodeSelector availableOnNodes = 2;
    -
    -  // Shareable determines whether the resource supports more
    -  // than one consumer at a time.
    -  // +optional
    -  optional bool shareable = 3;
    -}
    -
    -// AllocationResultModel must have one and only one field set.
    -message AllocationResultModel {
    -  // NamedResources describes the allocation result when using the named resources model.
    -  //
    -  // +optional
    -  optional NamedResourcesAllocationResult namedResources = 1;
    -}
    -
    -// DriverAllocationResult contains vendor parameters and the allocation result for
    -// one request.
    -message DriverAllocationResult {
    -  // VendorRequestParameters are the per-request configuration parameters
    -  // from the time that the claim was allocated.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorRequestParameters = 1;
    -
    -  optional AllocationResultModel allocationResultModel = 2;
    -}
    -
    -// DriverRequests describes all resources that are needed from one particular driver.
    -message DriverRequests {
    -  // DriverName is the name used by the DRA driver kubelet plugin.
    -  optional string driverName = 1;
    -
    -  // VendorParameters are arbitrary setup parameters for all requests of the
    -  // claim. They are ignored while allocating the claim.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 2;
    -
    -  // Requests describes all resources that are needed from the driver.
    -  // +listType=atomic
    -  repeated ResourceRequest requests = 3;
    -}
    -
    -// NamedResourcesAllocationResult is used in AllocationResultModel.
    -message NamedResourcesAllocationResult {
    -  // Name is the name of the selected resource instance.
    -  optional string name = 1;
    -}
    -
    -// NamedResourcesAttribute is a combination of an attribute name and its value.
    -message NamedResourcesAttribute {
    -  // Name is unique identifier among all resource instances managed by
    -  // the driver on the node. It must be a DNS subdomain.
    -  optional string name = 1;
    -
    -  optional NamedResourcesAttributeValue attributeValue = 2;
    -}
    -
    -// NamedResourcesAttributeValue must have one and only one field set.
    -message NamedResourcesAttributeValue {
    -  // QuantityValue is a quantity.
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity quantity = 6;
    -
    -  // BoolValue is a true/false value.
    -  optional bool bool = 2;
    -
    -  // IntValue is a 64-bit integer.
    -  optional int64 int = 7;
    -
    -  // IntSliceValue is an array of 64-bit integers.
    -  optional NamedResourcesIntSlice intSlice = 8;
    -
    -  // StringValue is a string.
    -  optional string string = 5;
    -
    -  // StringSliceValue is an array of strings.
    -  optional NamedResourcesStringSlice stringSlice = 9;
    -
    -  // VersionValue is a semantic version according to semver.org spec 2.0.0.
    -  optional string version = 10;
    -}
    -
    -// NamedResourcesFilter is used in ResourceFilterModel.
    -message NamedResourcesFilter {
    -  // Selector is a CEL expression which must evaluate to true if a
    -  // resource instance is suitable. The language is as defined in
    -  // https://kubernetes.io/docs/reference/using-api/cel/
    -  //
    -  // In addition, for each type NamedResourcesin AttributeValue there is a map that
    -  // resolves to the corresponding value of the instance under evaluation.
    -  // For example:
    -  //
    -  //    attributes.quantity["a"].isGreaterThan(quantity("0")) &&
    -  //    attributes.stringslice["b"].isSorted()
    -  optional string selector = 1;
    -}
    -
    -// NamedResourcesInstance represents one individual hardware instance that can be selected based
    -// on its attributes.
    -message NamedResourcesInstance {
    -  // Name is unique identifier among all resource instances managed by
    -  // the driver on the node. It must be a DNS subdomain.
    -  optional string name = 1;
    -
    -  // Attributes defines the attributes of this resource instance.
    -  // The name of each attribute must be unique.
    -  //
    -  // +listType=atomic
    -  // +optional
    -  repeated NamedResourcesAttribute attributes = 2;
    -}
    -
    -// NamedResourcesIntSlice contains a slice of 64-bit integers.
    -message NamedResourcesIntSlice {
    -  // Ints is the slice of 64-bit integers.
    -  //
    -  // +listType=atomic
    -  repeated int64 ints = 1;
    -}
    -
    -// NamedResourcesRequest is used in ResourceRequestModel.
    -message NamedResourcesRequest {
    -  // Selector is a CEL expression which must evaluate to true if a
    -  // resource instance is suitable. The language is as defined in
    -  // https://kubernetes.io/docs/reference/using-api/cel/
    -  //
    -  // In addition, for each type NamedResourcesin AttributeValue there is a map that
    -  // resolves to the corresponding value of the instance under evaluation.
    -  // For example:
    -  //
    -  //    attributes.quantity["a"].isGreaterThan(quantity("0")) &&
    -  //    attributes.stringslice["b"].isSorted()
    -  optional string selector = 1;
    -}
    -
    -// NamedResourcesResources is used in ResourceModel.
    -message NamedResourcesResources {
    -  // The list of all individual resources instances currently available.
    -  //
    -  // +listType=atomic
    -  repeated NamedResourcesInstance instances = 1;
    -}
    -
    -// NamedResourcesStringSlice contains a slice of strings.
    -message NamedResourcesStringSlice {
    -  // Strings is the slice of strings.
    -  //
    -  // +listType=atomic
    -  repeated string strings = 1;
    -}
    -
    -// PodSchedulingContext objects hold information that is needed to schedule
    -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
    -// mode.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -message PodSchedulingContext {
    -  // Standard object metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // Spec describes where resources for the Pod are needed.
    -  optional PodSchedulingContextSpec spec = 2;
    -
    -  // Status describes where resources for the Pod can be allocated.
    -  // +optional
    -  optional PodSchedulingContextStatus status = 3;
    -}
    -
    -// PodSchedulingContextList is a collection of Pod scheduling objects.
    -message PodSchedulingContextList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of PodSchedulingContext objects.
    -  repeated PodSchedulingContext items = 2;
    -}
    -
    -// PodSchedulingContextSpec describes where resources for the Pod are needed.
    -message PodSchedulingContextSpec {
    -  // SelectedNode is the node for which allocation of ResourceClaims that
    -  // are referenced by the Pod and that use "WaitForFirstConsumer"
    -  // allocation is to be attempted.
    -  // +optional
    -  optional string selectedNode = 1;
    -
    -  // PotentialNodes lists nodes where the Pod might be able to run.
    -  //
    -  // The size of this field is limited to 128. This is large enough for
    -  // many clusters. Larger clusters may need more attempts to find a node
    -  // that suits all pending resources. This may get increased in the
    -  // future, but not reduced.
    -  //
    -  // +listType=atomic
    -  // +optional
    -  repeated string potentialNodes = 2;
    -}
    -
    -// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
    -message PodSchedulingContextStatus {
    -  // ResourceClaims describes resource availability for each
    -  // pod.spec.resourceClaim entry where the corresponding ResourceClaim
    -  // uses "WaitForFirstConsumer" allocation mode.
    -  //
    -  // +listType=map
    -  // +listMapKey=name
    -  // +optional
    -  repeated ResourceClaimSchedulingStatus resourceClaims = 1;
    -}
    -
    -// ResourceClaim describes which resources are needed by a resource consumer.
    -// Its status tracks whether the resource has been allocated and what the
    -// resulting attributes are.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -message ResourceClaim {
    -  // Standard object metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // Spec describes the desired attributes of a resource that then needs
    -  // to be allocated. It can only be set once when creating the
    -  // ResourceClaim.
    -  optional ResourceClaimSpec spec = 2;
    -
    -  // Status describes whether the resource is available and with which
    -  // attributes.
    -  // +optional
    -  optional ResourceClaimStatus status = 3;
    -}
    -
    -// ResourceClaimConsumerReference contains enough information to let you
    -// locate the consumer of a ResourceClaim. The user must be a resource in the same
    -// namespace as the ResourceClaim.
    -message ResourceClaimConsumerReference {
    -  // APIGroup is the group for the resource being referenced. It is
    -  // empty for the core API. This matches the group in the APIVersion
    -  // that is used when creating the resources.
    -  // +optional
    -  optional string apiGroup = 1;
    -
    -  // Resource is the type of resource being referenced, for example "pods".
    -  optional string resource = 3;
    -
    -  // Name is the name of resource being referenced.
    -  optional string name = 4;
    -
    -  // UID identifies exactly one incarnation of the resource.
    -  optional string uid = 5;
    -}
    -
    -// ResourceClaimList is a collection of claims.
    -message ResourceClaimList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of resource claims.
    -  repeated ResourceClaim items = 2;
    -}
    -
    -// ResourceClaimParameters defines resource requests for a ResourceClaim in an
    -// in-tree format understood by Kubernetes.
    -message ResourceClaimParameters {
    -  // Standard object metadata
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // If this object was created from some other resource, then this links
    -  // back to that resource. This field is used to find the in-tree representation
    -  // of the claim parameters when the parameter reference of the claim refers
    -  // to some unknown type.
    -  // +optional
    -  optional ResourceClaimParametersReference generatedFrom = 2;
    -
    -  // Shareable indicates whether the allocated claim is meant to be shareable
    -  // by multiple consumers at the same time.
    -  // +optional
    -  optional bool shareable = 3;
    -
    -  // DriverRequests describes all resources that are needed for the
    -  // allocated claim. A single claim may use resources coming from
    -  // different drivers. For each driver, this array has at most one
    -  // entry which then may have one or more per-driver requests.
    -  //
    -  // May be empty, in which case the claim can always be allocated.
    -  //
    -  // +listType=atomic
    -  repeated DriverRequests driverRequests = 4;
    -}
    -
    -// ResourceClaimParametersList is a collection of ResourceClaimParameters.
    -message ResourceClaimParametersList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of node resource capacity objects.
    -  repeated ResourceClaimParameters items = 2;
    -}
    -
    -// ResourceClaimParametersReference contains enough information to let you
    -// locate the parameters for a ResourceClaim. The object must be in the same
    -// namespace as the ResourceClaim.
    -message ResourceClaimParametersReference {
    -  // APIGroup is the group for the resource being referenced. It is
    -  // empty for the core API. This matches the group in the APIVersion
    -  // that is used when creating the resources.
    -  // +optional
    -  optional string apiGroup = 1;
    -
    -  // Kind is the type of resource being referenced. This is the same
    -  // value as in the parameter object's metadata, for example "ConfigMap".
    -  optional string kind = 2;
    -
    -  // Name is the name of resource being referenced.
    -  optional string name = 3;
    -}
    -
    -// ResourceClaimSchedulingStatus contains information about one particular
    -// ResourceClaim with "WaitForFirstConsumer" allocation mode.
    -message ResourceClaimSchedulingStatus {
    -  // Name matches the pod.spec.resourceClaims[*].Name field.
    -  // +optional
    -  optional string name = 1;
    -
    -  // UnsuitableNodes lists nodes that the ResourceClaim cannot be
    -  // allocated for.
    -  //
    -  // The size of this field is limited to 128, the same as for
    -  // PodSchedulingSpec.PotentialNodes. This may get increased in the
    -  // future, but not reduced.
    -  //
    -  // +listType=atomic
    -  // +optional
    -  repeated string unsuitableNodes = 2;
    -}
    -
    -// ResourceClaimSpec defines how a resource is to be allocated.
    -message ResourceClaimSpec {
    -  // ResourceClassName references the driver and additional parameters
    -  // via the name of a ResourceClass that was created as part of the
    -  // driver deployment.
    -  optional string resourceClassName = 1;
    -
    -  // ParametersRef references a separate object with arbitrary parameters
    -  // that will be used by the driver when allocating a resource for the
    -  // claim.
    -  //
    -  // The object must be in the same namespace as the ResourceClaim.
    -  // +optional
    -  optional ResourceClaimParametersReference parametersRef = 2;
    -
    -  // Allocation can start immediately or when a Pod wants to use the
    -  // resource. "WaitForFirstConsumer" is the default.
    -  // +optional
    -  optional string allocationMode = 3;
    -}
    -
    -// ResourceClaimStatus tracks whether the resource has been allocated and what
    -// the resulting attributes are.
    -message ResourceClaimStatus {
    -  // DriverName is a copy of the driver name from the ResourceClass at
    -  // the time when allocation started.
    -  // +optional
    -  optional string driverName = 1;
    -
    -  // Allocation is set by the resource driver once a resource or set of
    -  // resources has been allocated successfully. If this is not specified, the
    -  // resources have not been allocated yet.
    -  // +optional
    -  optional AllocationResult allocation = 2;
    -
    -  // ReservedFor indicates which entities are currently allowed to use
    -  // the claim. A Pod which references a ResourceClaim which is not
    -  // reserved for that Pod will not be started.
    -  //
    -  // There can be at most 32 such reservations. This may get increased in
    -  // the future, but not reduced.
    -  //
    -  // +listType=map
    -  // +listMapKey=uid
    -  // +patchStrategy=merge
    -  // +patchMergeKey=uid
    -  // +optional
    -  repeated ResourceClaimConsumerReference reservedFor = 3;
    -
    -  // DeallocationRequested indicates that a ResourceClaim is to be
    -  // deallocated.
    -  //
    -  // The driver then must deallocate this claim and reset the field
    -  // together with clearing the Allocation field.
    -  //
    -  // While DeallocationRequested is set, no new consumers may be added to
    -  // ReservedFor.
    -  // +optional
    -  optional bool deallocationRequested = 4;
    -}
    -
    -// ResourceClaimTemplate is used to produce ResourceClaim objects.
    -message ResourceClaimTemplate {
    -  // Standard object metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // Describes the ResourceClaim that is to be generated.
    -  //
    -  // This field is immutable. A ResourceClaim will get created by the
    -  // control plane for a Pod when needed and then not get updated
    -  // anymore.
    -  optional ResourceClaimTemplateSpec spec = 2;
    -}
    -
    -// ResourceClaimTemplateList is a collection of claim templates.
    -message ResourceClaimTemplateList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of resource claim templates.
    -  repeated ResourceClaimTemplate items = 2;
    -}
    -
    -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
    -message ResourceClaimTemplateSpec {
    -  // ObjectMeta may contain labels and annotations that will be copied into the PVC
    -  // when creating it. No other fields are allowed and will be rejected during
    -  // validation.
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // Spec for the ResourceClaim. The entire content is copied unchanged
    -  // into the ResourceClaim that gets created from this template. The
    -  // same fields as in a ResourceClaim are also valid here.
    -  optional ResourceClaimSpec spec = 2;
    -}
    -
    -// ResourceClass is used by administrators to influence how resources
    -// are allocated.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -message ResourceClass {
    -  // Standard object metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // DriverName defines the name of the dynamic resource driver that is
    -  // used for allocation of a ResourceClaim that uses this class.
    -  //
    -  // Resource drivers have a unique name in forward domain order
    -  // (acme.example.com).
    -  optional string driverName = 2;
    -
    -  // ParametersRef references an arbitrary separate object that may hold
    -  // parameters that will be used by the driver when allocating a
    -  // resource that uses this class. A dynamic resource driver can
    -  // distinguish between parameters stored here and and those stored in
    -  // ResourceClaimSpec.
    -  // +optional
    -  optional ResourceClassParametersReference parametersRef = 3;
    -
    -  // Only nodes matching the selector will be considered by the scheduler
    -  // when trying to find a Node that fits a Pod when that Pod uses
    -  // a ResourceClaim that has not been allocated yet.
    -  //
    -  // Setting this field is optional. If null, all nodes are candidates.
    -  // +optional
    -  optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4;
    -
    -  // If and only if allocation of claims using this class is handled
    -  // via structured parameters, then StructuredParameters must be set to true.
    -  // +optional
    -  optional bool structuredParameters = 5;
    -}
    -
    -// ResourceClassList is a collection of classes.
    -message ResourceClassList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of resource classes.
    -  repeated ResourceClass items = 2;
    -}
    -
    -// ResourceClassParameters defines resource requests for a ResourceClass in an
    -// in-tree format understood by Kubernetes.
    -message ResourceClassParameters {
    -  // Standard object metadata
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // If this object was created from some other resource, then this links
    -  // back to that resource. This field is used to find the in-tree representation
    -  // of the class parameters when the parameter reference of the class refers
    -  // to some unknown type.
    -  // +optional
    -  optional ResourceClassParametersReference generatedFrom = 2;
    -
    -  // VendorParameters are arbitrary setup parameters for all claims using
    -  // this class. They are ignored while allocating the claim. There must
    -  // not be more than one entry per driver.
    -  //
    -  // +listType=atomic
    -  // +optional
    -  repeated VendorParameters vendorParameters = 3;
    -
    -  // Filters describes additional contraints that must be met when using the class.
    -  //
    -  // +listType=atomic
    -  repeated ResourceFilter filters = 4;
    -}
    -
    -// ResourceClassParametersList is a collection of ResourceClassParameters.
    -message ResourceClassParametersList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of node resource capacity objects.
    -  repeated ResourceClassParameters items = 2;
    -}
    -
    -// ResourceClassParametersReference contains enough information to let you
    -// locate the parameters for a ResourceClass.
    -message ResourceClassParametersReference {
    -  // APIGroup is the group for the resource being referenced. It is
    -  // empty for the core API. This matches the group in the APIVersion
    -  // that is used when creating the resources.
    -  // +optional
    -  optional string apiGroup = 1;
    -
    -  // Kind is the type of resource being referenced. This is the same
    -  // value as in the parameter object's metadata.
    -  optional string kind = 2;
    -
    -  // Name is the name of resource being referenced.
    -  optional string name = 3;
    -
    -  // Namespace that contains the referenced resource. Must be empty
    -  // for cluster-scoped resources and non-empty for namespaced
    -  // resources.
    -  // +optional
    -  optional string namespace = 4;
    -}
    -
    -// ResourceFilter is a filter for resources from one particular driver.
    -message ResourceFilter {
    -  // DriverName is the name used by the DRA driver kubelet plugin.
    -  optional string driverName = 1;
    -
    -  optional ResourceFilterModel resourceFilterModel = 2;
    -}
    -
    -// ResourceFilterModel must have one and only one field set.
    -message ResourceFilterModel {
    -  // NamedResources describes a resource filter using the named resources model.
    -  //
    -  // +optional
    -  optional NamedResourcesFilter namedResources = 1;
    -}
    -
    -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.
    -message ResourceHandle {
    -  // DriverName specifies the name of the resource driver whose kubelet
    -  // plugin should be invoked to process this ResourceHandle's data once it
    -  // lands on a node. This may differ from the DriverName set in
    -  // ResourceClaimStatus this ResourceHandle is embedded in.
    -  optional string driverName = 1;
    -
    -  // Data contains the opaque data associated with this ResourceHandle. It is
    -  // set by the controller component of the resource driver whose name
    -  // matches the DriverName set in the ResourceClaimStatus this
    -  // ResourceHandle is embedded in. It is set at allocation time and is
    -  // intended for processing by the kubelet plugin whose name matches
    -  // the DriverName set in this ResourceHandle.
    -  //
    -  // The maximum size of this field is 16KiB. This may get increased in the
    -  // future, but not reduced.
    -  // +optional
    -  optional string data = 2;
    -
    -  // If StructuredData is set, then it needs to be used instead of Data.
    -  //
    -  // +optional
    -  optional StructuredResourceHandle structuredData = 5;
    -}
    -
    -// ResourceModel must have one and only one field set.
    -message ResourceModel {
    -  // NamedResources describes available resources using the named resources model.
    -  //
    -  // +optional
    -  optional NamedResourcesResources namedResources = 1;
    -}
    -
    -// ResourceRequest is a request for resources from one particular driver.
    -message ResourceRequest {
    -  // VendorParameters are arbitrary setup parameters for the requested
    -  // resource. They are ignored while allocating a claim.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 1;
    -
    -  optional ResourceRequestModel resourceRequestModel = 2;
    -}
    -
    -// ResourceRequestModel must have one and only one field set.
    -message ResourceRequestModel {
    -  // NamedResources describes a request for resources with the named resources model.
    -  //
    -  // +optional
    -  optional NamedResourcesRequest namedResources = 1;
    -}
    -
    -// ResourceSlice provides information about available
    -// resources on individual nodes.
    -message ResourceSlice {
    -  // Standard object metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    -
    -  // NodeName identifies the node which provides the resources
    -  // if they are local to a node.
    -  //
    -  // A field selector can be used to list only ResourceSlice
    -  // objects with a certain node name.
    -  //
    -  // +optional
    -  optional string nodeName = 2;
    -
    -  // DriverName identifies the DRA driver providing the capacity information.
    -  // A field selector can be used to list only ResourceSlice
    -  // objects with a certain driver name.
    -  optional string driverName = 3;
    -
    -  optional ResourceModel resourceModel = 4;
    -}
    -
    -// ResourceSliceList is a collection of ResourceSlices.
    -message ResourceSliceList {
    -  // Standard list metadata
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    -
    -  // Items is the list of node resource capacity objects.
    -  repeated ResourceSlice items = 2;
    -}
    -
    -// StructuredResourceHandle is the in-tree representation of the allocation result.
    -message StructuredResourceHandle {
    -  // VendorClassParameters are the per-claim configuration parameters
    -  // from the resource class at the time that the claim was allocated.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClassParameters = 1;
    -
    -  // VendorClaimParameters are the per-claim configuration parameters
    -  // from the resource claim parameters at the time that the claim was
    -  // allocated.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClaimParameters = 2;
    -
    -  // NodeName is the name of the node providing the necessary resources
    -  // if the resources are local to a node.
    -  //
    -  // +optional
    -  optional string nodeName = 4;
    -
    -  // Results lists all allocated driver resources.
    -  //
    -  // +listType=atomic
    -  repeated DriverAllocationResult results = 5;
    -}
    -
    -// VendorParameters are opaque parameters for one particular driver.
    -message VendorParameters {
    -  // DriverName is the name used by the DRA driver kubelet plugin.
    -  optional string driverName = 1;
    -
    -  // Parameters can be arbitrary setup parameters. They are ignored while
    -  // allocating a claim.
    -  //
    -  // +optional
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2;
    -}
    -
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/namedresources.go b/vendor/k8s.io/api/resource/v1alpha2/namedresources.go
    deleted file mode 100644
    index b80c5c1432..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/namedresources.go
    +++ /dev/null
    @@ -1,127 +0,0 @@
    -/*
    -Copyright 2023 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha2
    -
    -import (
    -	"k8s.io/apimachinery/pkg/api/resource"
    -)
    -
    -// NamedResourcesResources is used in ResourceModel.
    -type NamedResourcesResources struct {
    -	// The list of all individual resources instances currently available.
    -	//
    -	// +listType=atomic
    -	Instances []NamedResourcesInstance `json:"instances" protobuf:"bytes,1,name=instances"`
    -}
    -
    -// NamedResourcesInstance represents one individual hardware instance that can be selected based
    -// on its attributes.
    -type NamedResourcesInstance struct {
    -	// Name is unique identifier among all resource instances managed by
    -	// the driver on the node. It must be a DNS subdomain.
    -	Name string `json:"name" protobuf:"bytes,1,name=name"`
    -
    -	// Attributes defines the attributes of this resource instance.
    -	// The name of each attribute must be unique.
    -	//
    -	// +listType=atomic
    -	// +optional
    -	Attributes []NamedResourcesAttribute `json:"attributes,omitempty" protobuf:"bytes,2,opt,name=attributes"`
    -}
    -
    -// NamedResourcesAttribute is a combination of an attribute name and its value.
    -type NamedResourcesAttribute struct {
    -	// Name is unique identifier among all resource instances managed by
    -	// the driver on the node. It must be a DNS subdomain.
    -	Name string `json:"name" protobuf:"bytes,1,name=name"`
    -
    -	NamedResourcesAttributeValue `json:",inline" protobuf:"bytes,2,opt,name=attributeValue"`
    -}
    -
    -// The Go field names below have a Value suffix to avoid a conflict between the
    -// field "String" and the corresponding method. That method is required.
    -// The Kubernetes API is defined without that suffix to keep it more natural.
    -
    -// NamedResourcesAttributeValue must have one and only one field set.
    -type NamedResourcesAttributeValue struct {
    -	// QuantityValue is a quantity.
    -	QuantityValue *resource.Quantity `json:"quantity,omitempty" protobuf:"bytes,6,opt,name=quantity"`
    -	// BoolValue is a true/false value.
    -	BoolValue *bool `json:"bool,omitempty" protobuf:"bytes,2,opt,name=bool"`
    -	// IntValue is a 64-bit integer.
    -	IntValue *int64 `json:"int,omitempty" protobuf:"varint,7,opt,name=int"`
    -	// IntSliceValue is an array of 64-bit integers.
    -	IntSliceValue *NamedResourcesIntSlice `json:"intSlice,omitempty" protobuf:"varint,8,rep,name=intSlice"`
    -	// StringValue is a string.
    -	StringValue *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"`
    -	// StringSliceValue is an array of strings.
    -	StringSliceValue *NamedResourcesStringSlice `json:"stringSlice,omitempty" protobuf:"bytes,9,rep,name=stringSlice"`
    -	// VersionValue is a semantic version according to semver.org spec 2.0.0.
    -	VersionValue *string `json:"version,omitempty" protobuf:"bytes,10,opt,name=version"`
    -}
    -
    -// NamedResourcesIntSlice contains a slice of 64-bit integers.
    -type NamedResourcesIntSlice struct {
    -	// Ints is the slice of 64-bit integers.
    -	//
    -	// +listType=atomic
    -	Ints []int64 `json:"ints" protobuf:"bytes,1,opt,name=ints"`
    -}
    -
    -// NamedResourcesStringSlice contains a slice of strings.
    -type NamedResourcesStringSlice struct {
    -	// Strings is the slice of strings.
    -	//
    -	// +listType=atomic
    -	Strings []string `json:"strings" protobuf:"bytes,1,opt,name=strings"`
    -}
    -
    -// NamedResourcesRequest is used in ResourceRequestModel.
    -type NamedResourcesRequest struct {
    -	// Selector is a CEL expression which must evaluate to true if a
    -	// resource instance is suitable. The language is as defined in
    -	// https://kubernetes.io/docs/reference/using-api/cel/
    -	//
    -	// In addition, for each type NamedResourcesin AttributeValue there is a map that
    -	// resolves to the corresponding value of the instance under evaluation.
    -	// For example:
    -	//
    -	//    attributes.quantity["a"].isGreaterThan(quantity("0")) &&
    -	//    attributes.stringslice["b"].isSorted()
    -	Selector string `json:"selector" protobuf:"bytes,1,name=selector"`
    -}
    -
    -// NamedResourcesFilter is used in ResourceFilterModel.
    -type NamedResourcesFilter struct {
    -	// Selector is a CEL expression which must evaluate to true if a
    -	// resource instance is suitable. The language is as defined in
    -	// https://kubernetes.io/docs/reference/using-api/cel/
    -	//
    -	// In addition, for each type NamedResourcesin AttributeValue there is a map that
    -	// resolves to the corresponding value of the instance under evaluation.
    -	// For example:
    -	//
    -	//    attributes.quantity["a"].isGreaterThan(quantity("0")) &&
    -	//    attributes.stringslice["b"].isSorted()
    -	Selector string `json:"selector" protobuf:"bytes,1,name=selector"`
    -}
    -
    -// NamedResourcesAllocationResult is used in AllocationResultModel.
    -type NamedResourcesAllocationResult struct {
    -	// Name is the name of the selected resource instance.
    -	Name string `json:"name" protobuf:"bytes,1,name=name"`
    -}
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/register.go b/vendor/k8s.io/api/resource/v1alpha2/register.go
    deleted file mode 100644
    index 893fb4c1e5..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/register.go
    +++ /dev/null
    @@ -1,69 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha2
    -
    -import (
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	"k8s.io/apimachinery/pkg/runtime"
    -	"k8s.io/apimachinery/pkg/runtime/schema"
    -)
    -
    -// GroupName is the group name use in this package
    -const GroupName = "resource.k8s.io"
    -
    -// SchemeGroupVersion is group version used to register these objects
    -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"}
    -
    -// Resource takes an unqualified resource and returns a Group qualified GroupResource
    -func Resource(resource string) schema.GroupResource {
    -	return SchemeGroupVersion.WithResource(resource).GroupResource()
    -}
    -
    -var (
    -	// We only register manually written functions here. The registration of the
    -	// generated functions takes place in the generated files. The separation
    -	// makes the code compile even when the generated files are missing.
    -	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
    -	AddToScheme   = SchemeBuilder.AddToScheme
    -)
    -
    -// Adds the list of known types to the given scheme.
    -func addKnownTypes(scheme *runtime.Scheme) error {
    -	scheme.AddKnownTypes(SchemeGroupVersion,
    -		&ResourceClass{},
    -		&ResourceClassList{},
    -		&ResourceClaim{},
    -		&ResourceClaimList{},
    -		&ResourceClaimTemplate{},
    -		&ResourceClaimTemplateList{},
    -		&PodSchedulingContext{},
    -		&PodSchedulingContextList{},
    -		&ResourceSlice{},
    -		&ResourceSliceList{},
    -		&ResourceClaimParameters{},
    -		&ResourceClaimParametersList{},
    -		&ResourceClassParameters{},
    -		&ResourceClassParametersList{},
    -	)
    -
    -	// Add common types
    -	scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{})
    -
    -	// Add the watch version that applies
    -	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    -	return nil
    -}
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go
    deleted file mode 100644
    index 9005144cf6..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/types.go
    +++ /dev/null
    @@ -1,737 +0,0 @@
    -/*
    -Copyright 2022 The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha2
    -
    -import (
    -	v1 "k8s.io/api/core/v1"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	"k8s.io/apimachinery/pkg/runtime"
    -	"k8s.io/apimachinery/pkg/types"
    -)
    -
    -const (
    -	// Finalizer is the finalizer that gets set for claims
    -	// which were allocated through a builtin controller.
    -	Finalizer = "dra.k8s.io/delete-protection"
    -)
    -
    -// +genclient
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClaim describes which resources are needed by a resource consumer.
    -// Its status tracks whether the resource has been allocated and what the
    -// resulting attributes are.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -type ResourceClaim struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Spec describes the desired attributes of a resource that then needs
    -	// to be allocated. It can only be set once when creating the
    -	// ResourceClaim.
    -	Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    -
    -	// Status describes whether the resource is available and with which
    -	// attributes.
    -	// +optional
    -	Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    -}
    -
    -// ResourceClaimSpec defines how a resource is to be allocated.
    -type ResourceClaimSpec struct {
    -	// ResourceClassName references the driver and additional parameters
    -	// via the name of a ResourceClass that was created as part of the
    -	// driver deployment.
    -	ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"`
    -
    -	// ParametersRef references a separate object with arbitrary parameters
    -	// that will be used by the driver when allocating a resource for the
    -	// claim.
    -	//
    -	// The object must be in the same namespace as the ResourceClaim.
    -	// +optional
    -	ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"`
    -
    -	// Allocation can start immediately or when a Pod wants to use the
    -	// resource. "WaitForFirstConsumer" is the default.
    -	// +optional
    -	AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"`
    -}
    -
    -// AllocationMode describes whether a ResourceClaim gets allocated immediately
    -// when it gets created (AllocationModeImmediate) or whether allocation is
    -// delayed until it is needed for a Pod
    -// (AllocationModeWaitForFirstConsumer). Other modes might get added in the
    -// future.
    -type AllocationMode string
    -
    -const (
    -	// When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is
    -	// delayed until a Pod gets scheduled that needs the ResourceClaim. The
    -	// scheduler will consider all resource requirements of that Pod and
    -	// trigger allocation for a node that fits the Pod.
    -	AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer"
    -
    -	// When a ResourceClaim has AllocationModeImmediate, allocation starts
    -	// as soon as the ResourceClaim gets created. This is done without
    -	// considering the needs of Pods that will use the ResourceClaim
    -	// because those Pods are not known yet.
    -	AllocationModeImmediate AllocationMode = "Immediate"
    -)
    -
    -// ResourceClaimStatus tracks whether the resource has been allocated and what
    -// the resulting attributes are.
    -type ResourceClaimStatus struct {
    -	// DriverName is a copy of the driver name from the ResourceClass at
    -	// the time when allocation started.
    -	// +optional
    -	DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
    -
    -	// Allocation is set by the resource driver once a resource or set of
    -	// resources has been allocated successfully. If this is not specified, the
    -	// resources have not been allocated yet.
    -	// +optional
    -	Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"`
    -
    -	// ReservedFor indicates which entities are currently allowed to use
    -	// the claim. A Pod which references a ResourceClaim which is not
    -	// reserved for that Pod will not be started.
    -	//
    -	// There can be at most 32 such reservations. This may get increased in
    -	// the future, but not reduced.
    -	//
    -	// +listType=map
    -	// +listMapKey=uid
    -	// +patchStrategy=merge
    -	// +patchMergeKey=uid
    -	// +optional
    -	ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
    -
    -	// DeallocationRequested indicates that a ResourceClaim is to be
    -	// deallocated.
    -	//
    -	// The driver then must deallocate this claim and reset the field
    -	// together with clearing the Allocation field.
    -	//
    -	// While DeallocationRequested is set, no new consumers may be added to
    -	// ReservedFor.
    -	// +optional
    -	DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"`
    -}
    -
    -// ReservedForMaxSize is the maximum number of entries in
    -// claim.status.reservedFor.
    -const ResourceClaimReservedForMaxSize = 32
    -
    -// AllocationResult contains attributes of an allocated resource.
    -type AllocationResult struct {
    -	// ResourceHandles contain the state associated with an allocation that
    -	// should be maintained throughout the lifetime of a claim. Each
    -	// ResourceHandle contains data that should be passed to a specific kubelet
    -	// plugin once it lands on a node. This data is returned by the driver
    -	// after a successful allocation and is opaque to Kubernetes. Driver
    -	// documentation may explain to users how to interpret this data if needed.
    -	//
    -	// Setting this field is optional. It has a maximum size of 32 entries.
    -	// If null (or empty), it is assumed this allocation will be processed by a
    -	// single kubelet plugin with no ResourceHandle data attached. The name of
    -	// the kubelet plugin invoked will match the DriverName set in the
    -	// ResourceClaimStatus this AllocationResult is embedded in.
    -	//
    -	// +listType=atomic
    -	// +optional
    -	ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"`
    -
    -	// This field will get set by the resource driver after it has allocated
    -	// the resource to inform the scheduler where it can schedule Pods using
    -	// the ResourceClaim.
    -	//
    -	// Setting this field is optional. If null, the resource is available
    -	// everywhere.
    -	// +optional
    -	AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"`
    -
    -	// Shareable determines whether the resource supports more
    -	// than one consumer at a time.
    -	// +optional
    -	Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"`
    -}
    -
    -// AllocationResultResourceHandlesMaxSize represents the maximum number of
    -// entries in allocation.resourceHandles.
    -const AllocationResultResourceHandlesMaxSize = 32
    -
    -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.
    -type ResourceHandle struct {
    -	// DriverName specifies the name of the resource driver whose kubelet
    -	// plugin should be invoked to process this ResourceHandle's data once it
    -	// lands on a node. This may differ from the DriverName set in
    -	// ResourceClaimStatus this ResourceHandle is embedded in.
    -	DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
    -
    -	// Data contains the opaque data associated with this ResourceHandle. It is
    -	// set by the controller component of the resource driver whose name
    -	// matches the DriverName set in the ResourceClaimStatus this
    -	// ResourceHandle is embedded in. It is set at allocation time and is
    -	// intended for processing by the kubelet plugin whose name matches
    -	// the DriverName set in this ResourceHandle.
    -	//
    -	// The maximum size of this field is 16KiB. This may get increased in the
    -	// future, but not reduced.
    -	// +optional
    -	Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
    -
    -	// If StructuredData is set, then it needs to be used instead of Data.
    -	//
    -	// +optional
    -	StructuredData *StructuredResourceHandle `json:"structuredData,omitempty" protobuf:"bytes,5,opt,name=structuredData"`
    -}
    -
    -// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data.
    -const ResourceHandleDataMaxSize = 16 * 1024
    -
    -// StructuredResourceHandle is the in-tree representation of the allocation result.
    -type StructuredResourceHandle struct {
    -	// VendorClassParameters are the per-claim configuration parameters
    -	// from the resource class at the time that the claim was allocated.
    -	//
    -	// +optional
    -	VendorClassParameters runtime.RawExtension `json:"vendorClassParameters,omitempty" protobuf:"bytes,1,opt,name=vendorClassParameters"`
    -
    -	// VendorClaimParameters are the per-claim configuration parameters
    -	// from the resource claim parameters at the time that the claim was
    -	// allocated.
    -	//
    -	// +optional
    -	VendorClaimParameters runtime.RawExtension `json:"vendorClaimParameters,omitempty" protobuf:"bytes,2,opt,name=vendorClaimParameters"`
    -
    -	// NodeName is the name of the node providing the necessary resources
    -	// if the resources are local to a node.
    -	//
    -	// +optional
    -	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,4,name=nodeName"`
    -
    -	// Results lists all allocated driver resources.
    -	//
    -	// +listType=atomic
    -	Results []DriverAllocationResult `json:"results" protobuf:"bytes,5,name=results"`
    -}
    -
    -// DriverAllocationResult contains vendor parameters and the allocation result for
    -// one request.
    -type DriverAllocationResult struct {
    -	// VendorRequestParameters are the per-request configuration parameters
    -	// from the time that the claim was allocated.
    -	//
    -	// +optional
    -	VendorRequestParameters runtime.RawExtension `json:"vendorRequestParameters,omitempty" protobuf:"bytes,1,opt,name=vendorRequestParameters"`
    -
    -	AllocationResultModel `json:",inline" protobuf:"bytes,2,name=allocationResultModel"`
    -}
    -
    -// AllocationResultModel must have one and only one field set.
    -type AllocationResultModel struct {
    -	// NamedResources describes the allocation result when using the named resources model.
    -	//
    -	// +optional
    -	NamedResources *NamedResourcesAllocationResult `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClaimList is a collection of claims.
    -type ResourceClaimList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of resource claims.
    -	Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// PodSchedulingContext objects hold information that is needed to schedule
    -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
    -// mode.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -type PodSchedulingContext struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Spec describes where resources for the Pod are needed.
    -	Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    -
    -	// Status describes where resources for the Pod can be allocated.
    -	// +optional
    -	Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    -}
    -
    -// PodSchedulingContextSpec describes where resources for the Pod are needed.
    -type PodSchedulingContextSpec struct {
    -	// SelectedNode is the node for which allocation of ResourceClaims that
    -	// are referenced by the Pod and that use "WaitForFirstConsumer"
    -	// allocation is to be attempted.
    -	// +optional
    -	SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"`
    -
    -	// PotentialNodes lists nodes where the Pod might be able to run.
    -	//
    -	// The size of this field is limited to 128. This is large enough for
    -	// many clusters. Larger clusters may need more attempts to find a node
    -	// that suits all pending resources. This may get increased in the
    -	// future, but not reduced.
    -	//
    -	// +listType=atomic
    -	// +optional
    -	PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
    -}
    -
    -// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
    -type PodSchedulingContextStatus struct {
    -	// ResourceClaims describes resource availability for each
    -	// pod.spec.resourceClaim entry where the corresponding ResourceClaim
    -	// uses "WaitForFirstConsumer" allocation mode.
    -	//
    -	// +listType=map
    -	// +listMapKey=name
    -	// +optional
    -	ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"`
    -
    -	// If there ever is a need to support other kinds of resources
    -	// than ResourceClaim, then new fields could get added here
    -	// for those other resources.
    -}
    -
    -// ResourceClaimSchedulingStatus contains information about one particular
    -// ResourceClaim with "WaitForFirstConsumer" allocation mode.
    -type ResourceClaimSchedulingStatus struct {
    -	// Name matches the pod.spec.resourceClaims[*].Name field.
    -	// +optional
    -	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
    -
    -	// UnsuitableNodes lists nodes that the ResourceClaim cannot be
    -	// allocated for.
    -	//
    -	// The size of this field is limited to 128, the same as for
    -	// PodSchedulingSpec.PotentialNodes. This may get increased in the
    -	// future, but not reduced.
    -	//
    -	// +listType=atomic
    -	// +optional
    -	UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"`
    -}
    -
    -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
    -// node lists that are stored in PodSchedulingContext objects. This limit is part
    -// of the API.
    -const PodSchedulingNodeListMaxSize = 128
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// PodSchedulingContextList is a collection of Pod scheduling objects.
    -type PodSchedulingContextList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of PodSchedulingContext objects.
    -	Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +genclient:nonNamespaced
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClass is used by administrators to influence how resources
    -// are allocated.
    -//
    -// This is an alpha type and requires enabling the DynamicResourceAllocation
    -// feature gate.
    -type ResourceClass struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// DriverName defines the name of the dynamic resource driver that is
    -	// used for allocation of a ResourceClaim that uses this class.
    -	//
    -	// Resource drivers have a unique name in forward domain order
    -	// (acme.example.com).
    -	DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"`
    -
    -	// ParametersRef references an arbitrary separate object that may hold
    -	// parameters that will be used by the driver when allocating a
    -	// resource that uses this class. A dynamic resource driver can
    -	// distinguish between parameters stored here and and those stored in
    -	// ResourceClaimSpec.
    -	// +optional
    -	ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"`
    -
    -	// Only nodes matching the selector will be considered by the scheduler
    -	// when trying to find a Node that fits a Pod when that Pod uses
    -	// a ResourceClaim that has not been allocated yet.
    -	//
    -	// Setting this field is optional. If null, all nodes are candidates.
    -	// +optional
    -	SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"`
    -
    -	// If and only if allocation of claims using this class is handled
    -	// via structured parameters, then StructuredParameters must be set to true.
    -	// +optional
    -	StructuredParameters *bool `json:"structuredParameters,omitempty" protobuf:"bytes,5,opt,name=structuredParameters"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClassList is a collection of classes.
    -type ResourceClassList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of resource classes.
    -	Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// ResourceClassParametersReference contains enough information to let you
    -// locate the parameters for a ResourceClass.
    -type ResourceClassParametersReference struct {
    -	// APIGroup is the group for the resource being referenced. It is
    -	// empty for the core API. This matches the group in the APIVersion
    -	// that is used when creating the resources.
    -	// +optional
    -	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
    -	// Kind is the type of resource being referenced. This is the same
    -	// value as in the parameter object's metadata.
    -	Kind string `json:"kind" protobuf:"bytes,2,name=kind"`
    -	// Name is the name of resource being referenced.
    -	Name string `json:"name" protobuf:"bytes,3,name=name"`
    -	// Namespace that contains the referenced resource. Must be empty
    -	// for cluster-scoped resources and non-empty for namespaced
    -	// resources.
    -	// +optional
    -	Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
    -}
    -
    -// ResourceClaimParametersReference contains enough information to let you
    -// locate the parameters for a ResourceClaim. The object must be in the same
    -// namespace as the ResourceClaim.
    -type ResourceClaimParametersReference struct {
    -	// APIGroup is the group for the resource being referenced. It is
    -	// empty for the core API. This matches the group in the APIVersion
    -	// that is used when creating the resources.
    -	// +optional
    -	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
    -	// Kind is the type of resource being referenced. This is the same
    -	// value as in the parameter object's metadata, for example "ConfigMap".
    -	Kind string `json:"kind" protobuf:"bytes,2,name=kind"`
    -	// Name is the name of resource being referenced.
    -	Name string `json:"name" protobuf:"bytes,3,name=name"`
    -}
    -
    -// ResourceClaimConsumerReference contains enough information to let you
    -// locate the consumer of a ResourceClaim. The user must be a resource in the same
    -// namespace as the ResourceClaim.
    -type ResourceClaimConsumerReference struct {
    -	// APIGroup is the group for the resource being referenced. It is
    -	// empty for the core API. This matches the group in the APIVersion
    -	// that is used when creating the resources.
    -	// +optional
    -	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
    -	// Resource is the type of resource being referenced, for example "pods".
    -	Resource string `json:"resource" protobuf:"bytes,3,name=resource"`
    -	// Name is the name of resource being referenced.
    -	Name string `json:"name" protobuf:"bytes,4,name=name"`
    -	// UID identifies exactly one incarnation of the resource.
    -	UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"`
    -}
    -
    -// +genclient
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClaimTemplate is used to produce ResourceClaim objects.
    -type ResourceClaimTemplate struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Describes the ResourceClaim that is to be generated.
    -	//
    -	// This field is immutable. A ResourceClaim will get created by the
    -	// control plane for a Pod when needed and then not get updated
    -	// anymore.
    -	Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    -}
    -
    -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
    -type ResourceClaimTemplateSpec struct {
    -	// ObjectMeta may contain labels and annotations that will be copied into the PVC
    -	// when creating it. No other fields are allowed and will be rejected during
    -	// validation.
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Spec for the ResourceClaim. The entire content is copied unchanged
    -	// into the ResourceClaim that gets created from this template. The
    -	// same fields as in a ResourceClaim are also valid here.
    -	Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.26
    -
    -// ResourceClaimTemplateList is a collection of claim templates.
    -type ResourceClaimTemplateList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of resource claim templates.
    -	Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +genclient:nonNamespaced
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceSlice provides information about available
    -// resources on individual nodes.
    -type ResourceSlice struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	// +optional
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// NodeName identifies the node which provides the resources
    -	// if they are local to a node.
    -	//
    -	// A field selector can be used to list only ResourceSlice
    -	// objects with a certain node name.
    -	//
    -	// +optional
    -	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,2,opt,name=nodeName"`
    -
    -	// DriverName identifies the DRA driver providing the capacity information.
    -	// A field selector can be used to list only ResourceSlice
    -	// objects with a certain driver name.
    -	DriverName string `json:"driverName" protobuf:"bytes,3,name=driverName"`
    -
    -	ResourceModel `json:",inline" protobuf:"bytes,4,name=resourceModel"`
    -}
    -
    -// ResourceModel must have one and only one field set.
    -type ResourceModel struct {
    -	// NamedResources describes available resources using the named resources model.
    -	//
    -	// +optional
    -	NamedResources *NamedResourcesResources `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceSliceList is a collection of ResourceSlices.
    -type ResourceSliceList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of node resource capacity objects.
    -	Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceClaimParameters defines resource requests for a ResourceClaim in an
    -// in-tree format understood by Kubernetes.
    -type ResourceClaimParameters struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// If this object was created from some other resource, then this links
    -	// back to that resource. This field is used to find the in-tree representation
    -	// of the claim parameters when the parameter reference of the claim refers
    -	// to some unknown type.
    -	// +optional
    -	GeneratedFrom *ResourceClaimParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"`
    -
    -	// Shareable indicates whether the allocated claim is meant to be shareable
    -	// by multiple consumers at the same time.
    -	// +optional
    -	Shareable bool `json:"shareable,omitempty" protobuf:"bytes,3,opt,name=shareable"`
    -
    -	// DriverRequests describes all resources that are needed for the
    -	// allocated claim. A single claim may use resources coming from
    -	// different drivers. For each driver, this array has at most one
    -	// entry which then may have one or more per-driver requests.
    -	//
    -	// May be empty, in which case the claim can always be allocated.
    -	//
    -	// +listType=atomic
    -	DriverRequests []DriverRequests `json:"driverRequests,omitempty" protobuf:"bytes,4,opt,name=driverRequests"`
    -}
    -
    -// DriverRequests describes all resources that are needed from one particular driver.
    -type DriverRequests struct {
    -	// DriverName is the name used by the DRA driver kubelet plugin.
    -	DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
    -
    -	// VendorParameters are arbitrary setup parameters for all requests of the
    -	// claim. They are ignored while allocating the claim.
    -	//
    -	// +optional
    -	VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,2,opt,name=vendorParameters"`
    -
    -	// Requests describes all resources that are needed from the driver.
    -	// +listType=atomic
    -	Requests []ResourceRequest `json:"requests,omitempty" protobuf:"bytes,3,opt,name=requests"`
    -}
    -
    -// ResourceRequest is a request for resources from one particular driver.
    -type ResourceRequest struct {
    -	// VendorParameters are arbitrary setup parameters for the requested
    -	// resource. They are ignored while allocating a claim.
    -	//
    -	// +optional
    -	VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,1,opt,name=vendorParameters"`
    -
    -	ResourceRequestModel `json:",inline" protobuf:"bytes,2,name=resourceRequestModel"`
    -}
    -
    -// ResourceRequestModel must have one and only one field set.
    -type ResourceRequestModel struct {
    -	// NamedResources describes a request for resources with the named resources model.
    -	//
    -	// +optional
    -	NamedResources *NamedResourcesRequest `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceClaimParametersList is a collection of ResourceClaimParameters.
    -type ResourceClaimParametersList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of node resource capacity objects.
    -	Items []ResourceClaimParameters `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// +genclient
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceClassParameters defines resource requests for a ResourceClass in an
    -// in-tree format understood by Kubernetes.
    -type ResourceClassParameters struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard object metadata
    -	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// If this object was created from some other resource, then this links
    -	// back to that resource. This field is used to find the in-tree representation
    -	// of the class parameters when the parameter reference of the class refers
    -	// to some unknown type.
    -	// +optional
    -	GeneratedFrom *ResourceClassParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"`
    -
    -	// VendorParameters are arbitrary setup parameters for all claims using
    -	// this class. They are ignored while allocating the claim. There must
    -	// not be more than one entry per driver.
    -	//
    -	// +listType=atomic
    -	// +optional
    -	VendorParameters []VendorParameters `json:"vendorParameters,omitempty" protobuf:"bytes,3,opt,name=vendorParameters"`
    -
    -	// Filters describes additional contraints that must be met when using the class.
    -	//
    -	// +listType=atomic
    -	Filters []ResourceFilter `json:"filters,omitempty" protobuf:"bytes,4,opt,name=filters"`
    -}
    -
    -// ResourceFilter is a filter for resources from one particular driver.
    -type ResourceFilter struct {
    -	// DriverName is the name used by the DRA driver kubelet plugin.
    -	DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
    -
    -	ResourceFilterModel `json:",inline" protobuf:"bytes,2,name=resourceFilterModel"`
    -}
    -
    -// ResourceFilterModel must have one and only one field set.
    -type ResourceFilterModel struct {
    -	// NamedResources describes a resource filter using the named resources model.
    -	//
    -	// +optional
    -	NamedResources *NamedResourcesFilter `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
    -}
    -
    -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    -// +k8s:prerelease-lifecycle-gen:introduced=1.30
    -
    -// ResourceClassParametersList is a collection of ResourceClassParameters.
    -type ResourceClassParametersList struct {
    -	metav1.TypeMeta `json:",inline"`
    -	// Standard list metadata
    -	// +optional
    -	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    -
    -	// Items is the list of node resource capacity objects.
    -	Items []ResourceClassParameters `json:"items" protobuf:"bytes,2,rep,name=items"`
    -}
    -
    -// VendorParameters are opaque parameters for one particular driver.
    -type VendorParameters struct {
    -	// DriverName is the name used by the DRA driver kubelet plugin.
    -	DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
    -
    -	// Parameters can be arbitrary setup parameters. They are ignored while
    -	// allocating a claim.
    -	//
    -	// +optional
    -	Parameters runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"`
    -}
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go
    deleted file mode 100644
    index 11f9ffbead..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go
    +++ /dev/null
    @@ -1,395 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -package v1alpha2
    -
    -// This file contains a collection of methods that can be used from go-restful to
    -// generate Swagger API documentation for its models. Please read this PR for more
    -// information on the implementation: https://github.com/emicklei/go-restful/pull/215
    -//
    -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
    -// they are on one line! For multiple line or blocks that you want to ignore use ---.
    -// Any context after a --- is ignored.
    -//
    -// Those methods can be generated by using hack/update-codegen.sh
    -
    -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    -var map_AllocationResult = map[string]string{
    -	"":                 "AllocationResult contains attributes of an allocated resource.",
    -	"resourceHandles":  "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.",
    -	"availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.",
    -	"shareable":        "Shareable determines whether the resource supports more than one consumer at a time.",
    -}
    -
    -func (AllocationResult) SwaggerDoc() map[string]string {
    -	return map_AllocationResult
    -}
    -
    -var map_AllocationResultModel = map[string]string{
    -	"":               "AllocationResultModel must have one and only one field set.",
    -	"namedResources": "NamedResources describes the allocation result when using the named resources model.",
    -}
    -
    -func (AllocationResultModel) SwaggerDoc() map[string]string {
    -	return map_AllocationResultModel
    -}
    -
    -var map_DriverAllocationResult = map[string]string{
    -	"":                        "DriverAllocationResult contains vendor parameters and the allocation result for one request.",
    -	"vendorRequestParameters": "VendorRequestParameters are the per-request configuration parameters from the time that the claim was allocated.",
    -}
    -
    -func (DriverAllocationResult) SwaggerDoc() map[string]string {
    -	return map_DriverAllocationResult
    -}
    -
    -var map_DriverRequests = map[string]string{
    -	"":                 "DriverRequests describes all resources that are needed from one particular driver.",
    -	"driverName":       "DriverName is the name used by the DRA driver kubelet plugin.",
    -	"vendorParameters": "VendorParameters are arbitrary setup parameters for all requests of the claim. They are ignored while allocating the claim.",
    -	"requests":         "Requests describes all resources that are needed from the driver.",
    -}
    -
    -func (DriverRequests) SwaggerDoc() map[string]string {
    -	return map_DriverRequests
    -}
    -
    -var map_PodSchedulingContext = map[string]string{
    -	"":         "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    -	"metadata": "Standard object metadata",
    -	"spec":     "Spec describes where resources for the Pod are needed.",
    -	"status":   "Status describes where resources for the Pod can be allocated.",
    -}
    -
    -func (PodSchedulingContext) SwaggerDoc() map[string]string {
    -	return map_PodSchedulingContext
    -}
    -
    -var map_PodSchedulingContextList = map[string]string{
    -	"":         "PodSchedulingContextList is a collection of Pod scheduling objects.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of PodSchedulingContext objects.",
    -}
    -
    -func (PodSchedulingContextList) SwaggerDoc() map[string]string {
    -	return map_PodSchedulingContextList
    -}
    -
    -var map_PodSchedulingContextSpec = map[string]string{
    -	"":               "PodSchedulingContextSpec describes where resources for the Pod are needed.",
    -	"selectedNode":   "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.",
    -	"potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
    -}
    -
    -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string {
    -	return map_PodSchedulingContextSpec
    -}
    -
    -var map_PodSchedulingContextStatus = map[string]string{
    -	"":               "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
    -	"resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
    -}
    -
    -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string {
    -	return map_PodSchedulingContextStatus
    -}
    -
    -var map_ResourceClaim = map[string]string{
    -	"":         "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    -	"metadata": "Standard object metadata",
    -	"spec":     "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.",
    -	"status":   "Status describes whether the resource is available and with which attributes.",
    -}
    -
    -func (ResourceClaim) SwaggerDoc() map[string]string {
    -	return map_ResourceClaim
    -}
    -
    -var map_ResourceClaimConsumerReference = map[string]string{
    -	"":         "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.",
    -	"apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
    -	"resource": "Resource is the type of resource being referenced, for example \"pods\".",
    -	"name":     "Name is the name of resource being referenced.",
    -	"uid":      "UID identifies exactly one incarnation of the resource.",
    -}
    -
    -func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimConsumerReference
    -}
    -
    -var map_ResourceClaimList = map[string]string{
    -	"":         "ResourceClaimList is a collection of claims.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of resource claims.",
    -}
    -
    -func (ResourceClaimList) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimList
    -}
    -
    -var map_ResourceClaimParameters = map[string]string{
    -	"":               "ResourceClaimParameters defines resource requests for a ResourceClaim in an in-tree format understood by Kubernetes.",
    -	"metadata":       "Standard object metadata",
    -	"generatedFrom":  "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the claim parameters when the parameter reference of the claim refers to some unknown type.",
    -	"shareable":      "Shareable indicates whether the allocated claim is meant to be shareable by multiple consumers at the same time.",
    -	"driverRequests": "DriverRequests describes all resources that are needed for the allocated claim. A single claim may use resources coming from different drivers. For each driver, this array has at most one entry which then may have one or more per-driver requests.\n\nMay be empty, in which case the claim can always be allocated.",
    -}
    -
    -func (ResourceClaimParameters) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimParameters
    -}
    -
    -var map_ResourceClaimParametersList = map[string]string{
    -	"":         "ResourceClaimParametersList is a collection of ResourceClaimParameters.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of node resource capacity objects.",
    -}
    -
    -func (ResourceClaimParametersList) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimParametersList
    -}
    -
    -var map_ResourceClaimParametersReference = map[string]string{
    -	"":         "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.",
    -	"apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
    -	"kind":     "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".",
    -	"name":     "Name is the name of resource being referenced.",
    -}
    -
    -func (ResourceClaimParametersReference) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimParametersReference
    -}
    -
    -var map_ResourceClaimSchedulingStatus = map[string]string{
    -	"":                "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.",
    -	"name":            "Name matches the pod.spec.resourceClaims[*].Name field.",
    -	"unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.",
    -}
    -
    -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimSchedulingStatus
    -}
    -
    -var map_ResourceClaimSpec = map[string]string{
    -	"":                  "ResourceClaimSpec defines how a resource is to be allocated.",
    -	"resourceClassName": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.",
    -	"parametersRef":     "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.",
    -	"allocationMode":    "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.",
    -}
    -
    -func (ResourceClaimSpec) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimSpec
    -}
    -
    -var map_ResourceClaimStatus = map[string]string{
    -	"":                      "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.",
    -	"driverName":            "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.",
    -	"allocation":            "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.",
    -	"reservedFor":           "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.",
    -	"deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.",
    -}
    -
    -func (ResourceClaimStatus) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimStatus
    -}
    -
    -var map_ResourceClaimTemplate = map[string]string{
    -	"":         "ResourceClaimTemplate is used to produce ResourceClaim objects.",
    -	"metadata": "Standard object metadata",
    -	"spec":     "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.",
    -}
    -
    -func (ResourceClaimTemplate) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimTemplate
    -}
    -
    -var map_ResourceClaimTemplateList = map[string]string{
    -	"":         "ResourceClaimTemplateList is a collection of claim templates.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of resource claim templates.",
    -}
    -
    -func (ResourceClaimTemplateList) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimTemplateList
    -}
    -
    -var map_ResourceClaimTemplateSpec = map[string]string{
    -	"":         "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.",
    -	"metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
    -	"spec":     "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.",
    -}
    -
    -func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string {
    -	return map_ResourceClaimTemplateSpec
    -}
    -
    -var map_ResourceClass = map[string]string{
    -	"":                     "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    -	"metadata":             "Standard object metadata",
    -	"driverName":           "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).",
    -	"parametersRef":        "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.",
    -	"suitableNodes":        "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.",
    -	"structuredParameters": "If and only if allocation of claims using this class is handled via structured parameters, then StructuredParameters must be set to true.",
    -}
    -
    -func (ResourceClass) SwaggerDoc() map[string]string {
    -	return map_ResourceClass
    -}
    -
    -var map_ResourceClassList = map[string]string{
    -	"":         "ResourceClassList is a collection of classes.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of resource classes.",
    -}
    -
    -func (ResourceClassList) SwaggerDoc() map[string]string {
    -	return map_ResourceClassList
    -}
    -
    -var map_ResourceClassParameters = map[string]string{
    -	"":                 "ResourceClassParameters defines resource requests for a ResourceClass in an in-tree format understood by Kubernetes.",
    -	"metadata":         "Standard object metadata",
    -	"generatedFrom":    "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the class parameters when the parameter reference of the class refers to some unknown type.",
    -	"vendorParameters": "VendorParameters are arbitrary setup parameters for all claims using this class. They are ignored while allocating the claim. There must not be more than one entry per driver.",
    -	"filters":          "Filters describes additional contraints that must be met when using the class.",
    -}
    -
    -func (ResourceClassParameters) SwaggerDoc() map[string]string {
    -	return map_ResourceClassParameters
    -}
    -
    -var map_ResourceClassParametersList = map[string]string{
    -	"":         "ResourceClassParametersList is a collection of ResourceClassParameters.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of node resource capacity objects.",
    -}
    -
    -func (ResourceClassParametersList) SwaggerDoc() map[string]string {
    -	return map_ResourceClassParametersList
    -}
    -
    -var map_ResourceClassParametersReference = map[string]string{
    -	"":          "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.",
    -	"apiGroup":  "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
    -	"kind":      "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.",
    -	"name":      "Name is the name of resource being referenced.",
    -	"namespace": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.",
    -}
    -
    -func (ResourceClassParametersReference) SwaggerDoc() map[string]string {
    -	return map_ResourceClassParametersReference
    -}
    -
    -var map_ResourceFilter = map[string]string{
    -	"":           "ResourceFilter is a filter for resources from one particular driver.",
    -	"driverName": "DriverName is the name used by the DRA driver kubelet plugin.",
    -}
    -
    -func (ResourceFilter) SwaggerDoc() map[string]string {
    -	return map_ResourceFilter
    -}
    -
    -var map_ResourceFilterModel = map[string]string{
    -	"":               "ResourceFilterModel must have one and only one field set.",
    -	"namedResources": "NamedResources describes a resource filter using the named resources model.",
    -}
    -
    -func (ResourceFilterModel) SwaggerDoc() map[string]string {
    -	return map_ResourceFilterModel
    -}
    -
    -var map_ResourceHandle = map[string]string{
    -	"":               "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.",
    -	"driverName":     "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.",
    -	"data":           "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.",
    -	"structuredData": "If StructuredData is set, then it needs to be used instead of Data.",
    -}
    -
    -func (ResourceHandle) SwaggerDoc() map[string]string {
    -	return map_ResourceHandle
    -}
    -
    -var map_ResourceModel = map[string]string{
    -	"":               "ResourceModel must have one and only one field set.",
    -	"namedResources": "NamedResources describes available resources using the named resources model.",
    -}
    -
    -func (ResourceModel) SwaggerDoc() map[string]string {
    -	return map_ResourceModel
    -}
    -
    -var map_ResourceRequest = map[string]string{
    -	"":                 "ResourceRequest is a request for resources from one particular driver.",
    -	"vendorParameters": "VendorParameters are arbitrary setup parameters for the requested resource. They are ignored while allocating a claim.",
    -}
    -
    -func (ResourceRequest) SwaggerDoc() map[string]string {
    -	return map_ResourceRequest
    -}
    -
    -var map_ResourceRequestModel = map[string]string{
    -	"":               "ResourceRequestModel must have one and only one field set.",
    -	"namedResources": "NamedResources describes a request for resources with the named resources model.",
    -}
    -
    -func (ResourceRequestModel) SwaggerDoc() map[string]string {
    -	return map_ResourceRequestModel
    -}
    -
    -var map_ResourceSlice = map[string]string{
    -	"":           "ResourceSlice provides information about available resources on individual nodes.",
    -	"metadata":   "Standard object metadata",
    -	"nodeName":   "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.",
    -	"driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.",
    -}
    -
    -func (ResourceSlice) SwaggerDoc() map[string]string {
    -	return map_ResourceSlice
    -}
    -
    -var map_ResourceSliceList = map[string]string{
    -	"":         "ResourceSliceList is a collection of ResourceSlices.",
    -	"metadata": "Standard list metadata",
    -	"items":    "Items is the list of node resource capacity objects.",
    -}
    -
    -func (ResourceSliceList) SwaggerDoc() map[string]string {
    -	return map_ResourceSliceList
    -}
    -
    -var map_StructuredResourceHandle = map[string]string{
    -	"":                      "StructuredResourceHandle is the in-tree representation of the allocation result.",
    -	"vendorClassParameters": "VendorClassParameters are the per-claim configuration parameters from the resource class at the time that the claim was allocated.",
    -	"vendorClaimParameters": "VendorClaimParameters are the per-claim configuration parameters from the resource claim parameters at the time that the claim was allocated.",
    -	"nodeName":              "NodeName is the name of the node providing the necessary resources if the resources are local to a node.",
    -	"results":               "Results lists all allocated driver resources.",
    -}
    -
    -func (StructuredResourceHandle) SwaggerDoc() map[string]string {
    -	return map_StructuredResourceHandle
    -}
    -
    -var map_VendorParameters = map[string]string{
    -	"":           "VendorParameters are opaque parameters for one particular driver.",
    -	"driverName": "DriverName is the name used by the DRA driver kubelet plugin.",
    -	"parameters": "Parameters can be arbitrary setup parameters. They are ignored while allocating a claim.",
    -}
    -
    -func (VendorParameters) SwaggerDoc() map[string]string {
    -	return map_VendorParameters
    -}
    -
    -// AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go
    deleted file mode 100644
    index 52de8e1ad5..0000000000
    --- a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go
    +++ /dev/null
    @@ -1,1126 +0,0 @@
    -//go:build !ignore_autogenerated
    -// +build !ignore_autogenerated
    -
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by deepcopy-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	v1 "k8s.io/api/core/v1"
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
    -	*out = *in
    -	if in.ResourceHandles != nil {
    -		in, out := &in.ResourceHandles, &out.ResourceHandles
    -		*out = make([]ResourceHandle, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	if in.AvailableOnNodes != nil {
    -		in, out := &in.AvailableOnNodes, &out.AvailableOnNodes
    -		*out = new(v1.NodeSelector)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult.
    -func (in *AllocationResult) DeepCopy() *AllocationResult {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(AllocationResult)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *AllocationResultModel) DeepCopyInto(out *AllocationResultModel) {
    -	*out = *in
    -	if in.NamedResources != nil {
    -		in, out := &in.NamedResources, &out.NamedResources
    -		*out = new(NamedResourcesAllocationResult)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResultModel.
    -func (in *AllocationResultModel) DeepCopy() *AllocationResultModel {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(AllocationResultModel)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *DriverAllocationResult) DeepCopyInto(out *DriverAllocationResult) {
    -	*out = *in
    -	in.VendorRequestParameters.DeepCopyInto(&out.VendorRequestParameters)
    -	in.AllocationResultModel.DeepCopyInto(&out.AllocationResultModel)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverAllocationResult.
    -func (in *DriverAllocationResult) DeepCopy() *DriverAllocationResult {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(DriverAllocationResult)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *DriverRequests) DeepCopyInto(out *DriverRequests) {
    -	*out = *in
    -	in.VendorParameters.DeepCopyInto(&out.VendorParameters)
    -	if in.Requests != nil {
    -		in, out := &in.Requests, &out.Requests
    -		*out = make([]ResourceRequest, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRequests.
    -func (in *DriverRequests) DeepCopy() *DriverRequests {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(DriverRequests)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesAllocationResult) DeepCopyInto(out *NamedResourcesAllocationResult) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAllocationResult.
    -func (in *NamedResourcesAllocationResult) DeepCopy() *NamedResourcesAllocationResult {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesAllocationResult)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesAttribute) DeepCopyInto(out *NamedResourcesAttribute) {
    -	*out = *in
    -	in.NamedResourcesAttributeValue.DeepCopyInto(&out.NamedResourcesAttributeValue)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttribute.
    -func (in *NamedResourcesAttribute) DeepCopy() *NamedResourcesAttribute {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesAttribute)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttributeValue) {
    -	*out = *in
    -	if in.QuantityValue != nil {
    -		in, out := &in.QuantityValue, &out.QuantityValue
    -		x := (*in).DeepCopy()
    -		*out = &x
    -	}
    -	if in.BoolValue != nil {
    -		in, out := &in.BoolValue, &out.BoolValue
    -		*out = new(bool)
    -		**out = **in
    -	}
    -	if in.IntValue != nil {
    -		in, out := &in.IntValue, &out.IntValue
    -		*out = new(int64)
    -		**out = **in
    -	}
    -	if in.IntSliceValue != nil {
    -		in, out := &in.IntSliceValue, &out.IntSliceValue
    -		*out = new(NamedResourcesIntSlice)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	if in.StringValue != nil {
    -		in, out := &in.StringValue, &out.StringValue
    -		*out = new(string)
    -		**out = **in
    -	}
    -	if in.StringSliceValue != nil {
    -		in, out := &in.StringSliceValue, &out.StringSliceValue
    -		*out = new(NamedResourcesStringSlice)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	if in.VersionValue != nil {
    -		in, out := &in.VersionValue, &out.VersionValue
    -		*out = new(string)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttributeValue.
    -func (in *NamedResourcesAttributeValue) DeepCopy() *NamedResourcesAttributeValue {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesAttributeValue)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesFilter) DeepCopyInto(out *NamedResourcesFilter) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesFilter.
    -func (in *NamedResourcesFilter) DeepCopy() *NamedResourcesFilter {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesFilter)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) {
    -	*out = *in
    -	if in.Attributes != nil {
    -		in, out := &in.Attributes, &out.Attributes
    -		*out = make([]NamedResourcesAttribute, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesInstance.
    -func (in *NamedResourcesInstance) DeepCopy() *NamedResourcesInstance {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesInstance)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesIntSlice) DeepCopyInto(out *NamedResourcesIntSlice) {
    -	*out = *in
    -	if in.Ints != nil {
    -		in, out := &in.Ints, &out.Ints
    -		*out = make([]int64, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesIntSlice.
    -func (in *NamedResourcesIntSlice) DeepCopy() *NamedResourcesIntSlice {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesIntSlice)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesRequest) DeepCopyInto(out *NamedResourcesRequest) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesRequest.
    -func (in *NamedResourcesRequest) DeepCopy() *NamedResourcesRequest {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesRequest)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) {
    -	*out = *in
    -	if in.Instances != nil {
    -		in, out := &in.Instances, &out.Instances
    -		*out = make([]NamedResourcesInstance, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesResources.
    -func (in *NamedResourcesResources) DeepCopy() *NamedResourcesResources {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesResources)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *NamedResourcesStringSlice) DeepCopyInto(out *NamedResourcesStringSlice) {
    -	*out = *in
    -	if in.Strings != nil {
    -		in, out := &in.Strings, &out.Strings
    -		*out = make([]string, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesStringSlice.
    -func (in *NamedResourcesStringSlice) DeepCopy() *NamedResourcesStringSlice {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(NamedResourcesStringSlice)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	in.Status.DeepCopyInto(&out.Status)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
    -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(PodSchedulingContext)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]PodSchedulingContext, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
    -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(PodSchedulingContextList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
    -	*out = *in
    -	if in.PotentialNodes != nil {
    -		in, out := &in.PotentialNodes, &out.PotentialNodes
    -		*out = make([]string, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
    -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(PodSchedulingContextSpec)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
    -	*out = *in
    -	if in.ResourceClaims != nil {
    -		in, out := &in.ResourceClaims, &out.ResourceClaims
    -		*out = make([]ResourceClaimSchedulingStatus, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
    -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(PodSchedulingContextStatus)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	in.Status.DeepCopyInto(&out.Status)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
    -func (in *ResourceClaim) DeepCopy() *ResourceClaim {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaim)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaim) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference.
    -func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimConsumerReference)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceClaim, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList.
    -func (in *ResourceClaimList) DeepCopy() *ResourceClaimList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimParameters) DeepCopyInto(out *ResourceClaimParameters) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	if in.GeneratedFrom != nil {
    -		in, out := &in.GeneratedFrom, &out.GeneratedFrom
    -		*out = new(ResourceClaimParametersReference)
    -		**out = **in
    -	}
    -	if in.DriverRequests != nil {
    -		in, out := &in.DriverRequests, &out.DriverRequests
    -		*out = make([]DriverRequests, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParameters.
    -func (in *ResourceClaimParameters) DeepCopy() *ResourceClaimParameters {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimParameters)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaimParameters) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimParametersList) DeepCopyInto(out *ResourceClaimParametersList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceClaimParameters, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersList.
    -func (in *ResourceClaimParametersList) DeepCopy() *ResourceClaimParametersList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimParametersList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaimParametersList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference.
    -func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimParametersReference)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) {
    -	*out = *in
    -	if in.UnsuitableNodes != nil {
    -		in, out := &in.UnsuitableNodes, &out.UnsuitableNodes
    -		*out = make([]string, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus.
    -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimSchedulingStatus)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
    -	*out = *in
    -	if in.ParametersRef != nil {
    -		in, out := &in.ParametersRef, &out.ParametersRef
    -		*out = new(ResourceClaimParametersReference)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec.
    -func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimSpec)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
    -	*out = *in
    -	if in.Allocation != nil {
    -		in, out := &in.Allocation, &out.Allocation
    -		*out = new(AllocationResult)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	if in.ReservedFor != nil {
    -		in, out := &in.ReservedFor, &out.ReservedFor
    -		*out = make([]ResourceClaimConsumerReference, len(*in))
    -		copy(*out, *in)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus.
    -func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimStatus)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate.
    -func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimTemplate)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceClaimTemplate, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList.
    -func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimTemplateList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) {
    -	*out = *in
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.Spec.DeepCopyInto(&out.Spec)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec.
    -func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClaimTemplateSpec)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClass) DeepCopyInto(out *ResourceClass) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	if in.ParametersRef != nil {
    -		in, out := &in.ParametersRef, &out.ParametersRef
    -		*out = new(ResourceClassParametersReference)
    -		**out = **in
    -	}
    -	if in.SuitableNodes != nil {
    -		in, out := &in.SuitableNodes, &out.SuitableNodes
    -		*out = new(v1.NodeSelector)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	if in.StructuredParameters != nil {
    -		in, out := &in.StructuredParameters, &out.StructuredParameters
    -		*out = new(bool)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass.
    -func (in *ResourceClass) DeepCopy() *ResourceClass {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClass)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClass) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceClass, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList.
    -func (in *ResourceClassList) DeepCopy() *ResourceClassList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClassList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClassList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClassParameters) DeepCopyInto(out *ResourceClassParameters) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	if in.GeneratedFrom != nil {
    -		in, out := &in.GeneratedFrom, &out.GeneratedFrom
    -		*out = new(ResourceClassParametersReference)
    -		**out = **in
    -	}
    -	if in.VendorParameters != nil {
    -		in, out := &in.VendorParameters, &out.VendorParameters
    -		*out = make([]VendorParameters, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	if in.Filters != nil {
    -		in, out := &in.Filters, &out.Filters
    -		*out = make([]ResourceFilter, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParameters.
    -func (in *ResourceClassParameters) DeepCopy() *ResourceClassParameters {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClassParameters)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClassParameters) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClassParametersList) DeepCopyInto(out *ResourceClassParametersList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceClassParameters, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersList.
    -func (in *ResourceClassParametersList) DeepCopy() *ResourceClassParametersList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClassParametersList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceClassParametersList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) {
    -	*out = *in
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference.
    -func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceClassParametersReference)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) {
    -	*out = *in
    -	in.ResourceFilterModel.DeepCopyInto(&out.ResourceFilterModel)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter.
    -func (in *ResourceFilter) DeepCopy() *ResourceFilter {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceFilter)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceFilterModel) DeepCopyInto(out *ResourceFilterModel) {
    -	*out = *in
    -	if in.NamedResources != nil {
    -		in, out := &in.NamedResources, &out.NamedResources
    -		*out = new(NamedResourcesFilter)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilterModel.
    -func (in *ResourceFilterModel) DeepCopy() *ResourceFilterModel {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceFilterModel)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) {
    -	*out = *in
    -	if in.StructuredData != nil {
    -		in, out := &in.StructuredData, &out.StructuredData
    -		*out = new(StructuredResourceHandle)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle.
    -func (in *ResourceHandle) DeepCopy() *ResourceHandle {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceHandle)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceModel) DeepCopyInto(out *ResourceModel) {
    -	*out = *in
    -	if in.NamedResources != nil {
    -		in, out := &in.NamedResources, &out.NamedResources
    -		*out = new(NamedResourcesResources)
    -		(*in).DeepCopyInto(*out)
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceModel.
    -func (in *ResourceModel) DeepCopy() *ResourceModel {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceModel)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceRequest) DeepCopyInto(out *ResourceRequest) {
    -	*out = *in
    -	in.VendorParameters.DeepCopyInto(&out.VendorParameters)
    -	in.ResourceRequestModel.DeepCopyInto(&out.ResourceRequestModel)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequest.
    -func (in *ResourceRequest) DeepCopy() *ResourceRequest {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceRequest)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceRequestModel) DeepCopyInto(out *ResourceRequestModel) {
    -	*out = *in
    -	if in.NamedResources != nil {
    -		in, out := &in.NamedResources, &out.NamedResources
    -		*out = new(NamedResourcesRequest)
    -		**out = **in
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequestModel.
    -func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceRequestModel)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    -	in.ResourceModel.DeepCopyInto(&out.ResourceModel)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
    -func (in *ResourceSlice) DeepCopy() *ResourceSlice {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceSlice)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceSlice) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
    -	*out = *in
    -	out.TypeMeta = in.TypeMeta
    -	in.ListMeta.DeepCopyInto(&out.ListMeta)
    -	if in.Items != nil {
    -		in, out := &in.Items, &out.Items
    -		*out = make([]ResourceSlice, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
    -func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(ResourceSliceList)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    -func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
    -	if c := in.DeepCopy(); c != nil {
    -		return c
    -	}
    -	return nil
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) {
    -	*out = *in
    -	in.VendorClassParameters.DeepCopyInto(&out.VendorClassParameters)
    -	in.VendorClaimParameters.DeepCopyInto(&out.VendorClaimParameters)
    -	if in.Results != nil {
    -		in, out := &in.Results, &out.Results
    -		*out = make([]DriverAllocationResult, len(*in))
    -		for i := range *in {
    -			(*in)[i].DeepCopyInto(&(*out)[i])
    -		}
    -	}
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuredResourceHandle.
    -func (in *StructuredResourceHandle) DeepCopy() *StructuredResourceHandle {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(StructuredResourceHandle)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    -
    -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    -func (in *VendorParameters) DeepCopyInto(out *VendorParameters) {
    -	*out = *in
    -	in.Parameters.DeepCopyInto(&out.Parameters)
    -	return
    -}
    -
    -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VendorParameters.
    -func (in *VendorParameters) DeepCopy() *VendorParameters {
    -	if in == nil {
    -		return nil
    -	}
    -	out := new(VendorParameters)
    -	in.DeepCopyInto(out)
    -	return out
    -}
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go
    new file mode 100644
    index 0000000000..aeb66561fb
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go
    @@ -0,0 +1,24 @@
    +/*
    +Copyright 2022 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// +k8s:openapi-gen=true
    +// +k8s:deepcopy-gen=package
    +// +k8s:protobuf-gen=package
    +
    +// +groupName=resource.k8s.io
    +
    +// Package v1alpha3 is the v1alpha3 version of the resource API.
    +package v1alpha3 // import "k8s.io/api/resource/v1alpha3"
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
    new file mode 100644
    index 0000000000..4ac01cc6f3
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
    @@ -0,0 +1,8987 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by protoc-gen-gogo. DO NOT EDIT.
    +// source: k8s.io/api/resource/v1alpha3/generated.proto
    +
    +package v1alpha3
    +
    +import (
    +	fmt "fmt"
    +
    +	io "io"
    +
    +	proto "github.com/gogo/protobuf/proto"
    +	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
    +	v1 "k8s.io/api/core/v1"
    +	resource "k8s.io/apimachinery/pkg/api/resource"
    +
    +	math "math"
    +	math_bits "math/bits"
    +	reflect "reflect"
    +	strings "strings"
    +
    +	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
    +)
    +
    +// Reference imports to suppress errors if they are not otherwise used.
    +var _ = proto.Marshal
    +var _ = fmt.Errorf
    +var _ = math.Inf
    +
    +// This is a compile-time assertion to ensure that this generated file
    +// is compatible with the proto package it is being compiled against.
    +// A compilation error at this line likely means your copy of the
    +// proto package needs to be updated.
    +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
    +
    +func (m *AllocationResult) Reset()      { *m = AllocationResult{} }
    +func (*AllocationResult) ProtoMessage() {}
    +func (*AllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{0}
    +}
    +func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *AllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_AllocationResult.Merge(m, src)
    +}
    +func (m *AllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *AllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_AllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
    +
    +func (m *BasicDevice) Reset()      { *m = BasicDevice{} }
    +func (*BasicDevice) ProtoMessage() {}
    +func (*BasicDevice) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{1}
    +}
    +func (m *BasicDevice) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *BasicDevice) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_BasicDevice.Merge(m, src)
    +}
    +func (m *BasicDevice) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *BasicDevice) XXX_DiscardUnknown() {
    +	xxx_messageInfo_BasicDevice.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo
    +
    +func (m *CELDeviceSelector) Reset()      { *m = CELDeviceSelector{} }
    +func (*CELDeviceSelector) ProtoMessage() {}
    +func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{2}
    +}
    +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_CELDeviceSelector.Merge(m, src)
    +}
    +func (m *CELDeviceSelector) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *CELDeviceSelector) XXX_DiscardUnknown() {
    +	xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
    +
    +func (m *Device) Reset()      { *m = Device{} }
    +func (*Device) ProtoMessage() {}
    +func (*Device) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{3}
    +}
    +func (m *Device) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *Device) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_Device.Merge(m, src)
    +}
    +func (m *Device) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *Device) XXX_DiscardUnknown() {
    +	xxx_messageInfo_Device.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_Device proto.InternalMessageInfo
    +
    +func (m *DeviceAllocationConfiguration) Reset()      { *m = DeviceAllocationConfiguration{} }
    +func (*DeviceAllocationConfiguration) ProtoMessage() {}
    +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{4}
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
    +}
    +func (m *DeviceAllocationConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceAllocationResult) Reset()      { *m = DeviceAllocationResult{} }
    +func (*DeviceAllocationResult) ProtoMessage() {}
    +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{5}
    +}
    +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
    +}
    +func (m *DeviceAllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
    +
    +func (m *DeviceAttribute) Reset()      { *m = DeviceAttribute{} }
    +func (*DeviceAttribute) ProtoMessage() {}
    +func (*DeviceAttribute) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{6}
    +}
    +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceAttribute.Merge(m, src)
    +}
    +func (m *DeviceAttribute) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceAttribute) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
    +
    +func (m *DeviceClaim) Reset()      { *m = DeviceClaim{} }
    +func (*DeviceClaim) ProtoMessage() {}
    +func (*DeviceClaim) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{7}
    +}
    +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClaim) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClaim.Merge(m, src)
    +}
    +func (m *DeviceClaim) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClaim) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
    +
    +func (m *DeviceClaimConfiguration) Reset()      { *m = DeviceClaimConfiguration{} }
    +func (*DeviceClaimConfiguration) ProtoMessage() {}
    +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{8}
    +}
    +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
    +}
    +func (m *DeviceClaimConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceClass) Reset()      { *m = DeviceClass{} }
    +func (*DeviceClass) ProtoMessage() {}
    +func (*DeviceClass) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{9}
    +}
    +func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClass) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClass.Merge(m, src)
    +}
    +func (m *DeviceClass) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClass) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClass.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
    +
    +func (m *DeviceClassConfiguration) Reset()      { *m = DeviceClassConfiguration{} }
    +func (*DeviceClassConfiguration) ProtoMessage() {}
    +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{10}
    +}
    +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
    +}
    +func (m *DeviceClassConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceClassList) Reset()      { *m = DeviceClassList{} }
    +func (*DeviceClassList) ProtoMessage() {}
    +func (*DeviceClassList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{11}
    +}
    +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassList.Merge(m, src)
    +}
    +func (m *DeviceClassList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
    +
    +func (m *DeviceClassSpec) Reset()      { *m = DeviceClassSpec{} }
    +func (*DeviceClassSpec) ProtoMessage() {}
    +func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{12}
    +}
    +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceClassSpec.Merge(m, src)
    +}
    +func (m *DeviceClassSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceClassSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
    +
    +func (m *DeviceConfiguration) Reset()      { *m = DeviceConfiguration{} }
    +func (*DeviceConfiguration) ProtoMessage() {}
    +func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{13}
    +}
    +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceConfiguration.Merge(m, src)
    +}
    +func (m *DeviceConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
    +
    +func (m *DeviceConstraint) Reset()      { *m = DeviceConstraint{} }
    +func (*DeviceConstraint) ProtoMessage() {}
    +func (*DeviceConstraint) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{14}
    +}
    +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceConstraint.Merge(m, src)
    +}
    +func (m *DeviceConstraint) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceConstraint) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
    +
    +func (m *DeviceRequest) Reset()      { *m = DeviceRequest{} }
    +func (*DeviceRequest) ProtoMessage() {}
    +func (*DeviceRequest) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{15}
    +}
    +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceRequest) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceRequest.Merge(m, src)
    +}
    +func (m *DeviceRequest) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceRequest) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
    +
    +func (m *DeviceRequestAllocationResult) Reset()      { *m = DeviceRequestAllocationResult{} }
    +func (*DeviceRequestAllocationResult) ProtoMessage() {}
    +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{16}
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
    +}
    +func (m *DeviceRequestAllocationResult) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
    +
    +func (m *DeviceSelector) Reset()      { *m = DeviceSelector{} }
    +func (*DeviceSelector) ProtoMessage() {}
    +func (*DeviceSelector) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{17}
    +}
    +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *DeviceSelector) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_DeviceSelector.Merge(m, src)
    +}
    +func (m *DeviceSelector) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *DeviceSelector) XXX_DiscardUnknown() {
    +	xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
    +
    +func (m *OpaqueDeviceConfiguration) Reset()      { *m = OpaqueDeviceConfiguration{} }
    +func (*OpaqueDeviceConfiguration) ProtoMessage() {}
    +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{18}
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
    +	xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
    +
    +func (m *PodSchedulingContext) Reset()      { *m = PodSchedulingContext{} }
    +func (*PodSchedulingContext) ProtoMessage() {}
    +func (*PodSchedulingContext) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{19}
    +}
    +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodSchedulingContext.Merge(m, src)
    +}
    +func (m *PodSchedulingContext) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodSchedulingContext) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo
    +
    +func (m *PodSchedulingContextList) Reset()      { *m = PodSchedulingContextList{} }
    +func (*PodSchedulingContextList) ProtoMessage() {}
    +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{20}
    +}
    +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodSchedulingContextList.Merge(m, src)
    +}
    +func (m *PodSchedulingContextList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodSchedulingContextList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo
    +
    +func (m *PodSchedulingContextSpec) Reset()      { *m = PodSchedulingContextSpec{} }
    +func (*PodSchedulingContextSpec) ProtoMessage() {}
    +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{21}
    +}
    +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src)
    +}
    +func (m *PodSchedulingContextSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo
    +
    +func (m *PodSchedulingContextStatus) Reset()      { *m = PodSchedulingContextStatus{} }
    +func (*PodSchedulingContextStatus) ProtoMessage() {}
    +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{22}
    +}
    +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src)
    +}
    +func (m *PodSchedulingContextStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo
    +
    +func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
    +func (*ResourceClaim) ProtoMessage() {}
    +func (*ResourceClaim) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{23}
    +}
    +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaim) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaim.Merge(m, src)
    +}
    +func (m *ResourceClaim) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaim) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
    +
    +func (m *ResourceClaimConsumerReference) Reset()      { *m = ResourceClaimConsumerReference{} }
    +func (*ResourceClaimConsumerReference) ProtoMessage() {}
    +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{24}
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
    +}
    +func (m *ResourceClaimConsumerReference) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
    +
    +func (m *ResourceClaimList) Reset()      { *m = ResourceClaimList{} }
    +func (*ResourceClaimList) ProtoMessage() {}
    +func (*ResourceClaimList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{25}
    +}
    +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimList.Merge(m, src)
    +}
    +func (m *ResourceClaimList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
    +
    +func (m *ResourceClaimSchedulingStatus) Reset()      { *m = ResourceClaimSchedulingStatus{} }
    +func (*ResourceClaimSchedulingStatus) ProtoMessage() {}
    +func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{26}
    +}
    +func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src)
    +}
    +func (m *ResourceClaimSchedulingStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo
    +
    +func (m *ResourceClaimSpec) Reset()      { *m = ResourceClaimSpec{} }
    +func (*ResourceClaimSpec) ProtoMessage() {}
    +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{27}
    +}
    +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
    +}
    +func (m *ResourceClaimSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
    +
    +func (m *ResourceClaimStatus) Reset()      { *m = ResourceClaimStatus{} }
    +func (*ResourceClaimStatus) ProtoMessage() {}
    +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{28}
    +}
    +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
    +}
    +func (m *ResourceClaimStatus) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplate) Reset()      { *m = ResourceClaimTemplate{} }
    +func (*ResourceClaimTemplate) ProtoMessage() {}
    +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{29}
    +}
    +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplate) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplateList) Reset()      { *m = ResourceClaimTemplateList{} }
    +func (*ResourceClaimTemplateList) ProtoMessage() {}
    +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{30}
    +}
    +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplateList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
    +
    +func (m *ResourceClaimTemplateSpec) Reset()      { *m = ResourceClaimTemplateSpec{} }
    +func (*ResourceClaimTemplateSpec) ProtoMessage() {}
    +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{31}
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
    +
    +func (m *ResourcePool) Reset()      { *m = ResourcePool{} }
    +func (*ResourcePool) ProtoMessage() {}
    +func (*ResourcePool) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{32}
    +}
    +func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourcePool) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourcePool.Merge(m, src)
    +}
    +func (m *ResourcePool) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourcePool) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourcePool.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
    +
    +func (m *ResourceSlice) Reset()      { *m = ResourceSlice{} }
    +func (*ResourceSlice) ProtoMessage() {}
    +func (*ResourceSlice) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{33}
    +}
    +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSlice) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSlice.Merge(m, src)
    +}
    +func (m *ResourceSlice) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSlice) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
    +
    +func (m *ResourceSliceList) Reset()      { *m = ResourceSliceList{} }
    +func (*ResourceSliceList) ProtoMessage() {}
    +func (*ResourceSliceList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{34}
    +}
    +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSliceList.Merge(m, src)
    +}
    +func (m *ResourceSliceList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSliceList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
    +
    +func (m *ResourceSliceSpec) Reset()      { *m = ResourceSliceSpec{} }
    +func (*ResourceSliceSpec) ProtoMessage() {}
    +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_66649ee9bbcd89d2, []int{35}
    +}
    +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
    +}
    +func (m *ResourceSliceSpec) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
    +	xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
    +
    +func init() {
    +	proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult")
    +	proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice")
    +	proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry")
    +	proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry")
    +	proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector")
    +	proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device")
    +	proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration")
    +	proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult")
    +	proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute")
    +	proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim")
    +	proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration")
    +	proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass")
    +	proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration")
    +	proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList")
    +	proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec")
    +	proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration")
    +	proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint")
    +	proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest")
    +	proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult")
    +	proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
    +	proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration")
    +	proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext")
    +	proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList")
    +	proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec")
    +	proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus")
    +	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim")
    +	proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference")
    +	proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList")
    +	proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus")
    +	proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec")
    +	proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus")
    +	proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate")
    +	proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList")
    +	proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec")
    +	proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool")
    +	proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice")
    +	proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList")
    +	proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec")
    +}
    +
    +func init() {
    +	proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2)
    +}
    +
    +var fileDescriptor_66649ee9bbcd89d2 = []byte{
    +	// 2085 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57,
    +	0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda,
    +	0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26,
    +	0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12,
    +	0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24,
    +	0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b,
    +	0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3,
    +	0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4,
    +	0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a,
    +	0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5,
    +	0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30,
    +	0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78,
    +	0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96,
    +	0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71,
    +	0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4,
    +	0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81,
    +	0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a,
    +	0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f,
    +	0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d,
    +	0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e,
    +	0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a,
    +	0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2,
    +	0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a,
    +	0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf,
    +	0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9,
    +	0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4,
    +	0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68,
    +	0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a,
    +	0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26,
    +	0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52,
    +	0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b,
    +	0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed,
    +	0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93,
    +	0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6,
    +	0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d,
    +	0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1,
    +	0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9,
    +	0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91,
    +	0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06,
    +	0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3,
    +	0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59,
    +	0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54,
    +	0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d,
    +	0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66,
    +	0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6,
    +	0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa,
    +	0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71,
    +	0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85,
    +	0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16,
    +	0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74,
    +	0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66,
    +	0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82,
    +	0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50,
    +	0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21,
    +	0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c,
    +	0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe,
    +	0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6,
    +	0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38,
    +	0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed,
    +	0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30,
    +	0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4,
    +	0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17,
    +	0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47,
    +	0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93,
    +	0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11,
    +	0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd,
    +	0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf,
    +	0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98,
    +	0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0,
    +	0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d,
    +	0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67,
    +	0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf,
    +	0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e,
    +	0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2,
    +	0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a,
    +	0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12,
    +	0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8,
    +	0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88,
    +	0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8,
    +	0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30,
    +	0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0,
    +	0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba,
    +	0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0,
    +	0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe,
    +	0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e,
    +	0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17,
    +	0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41,
    +	0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65,
    +	0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a,
    +	0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79,
    +	0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1,
    +	0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9,
    +	0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96,
    +	0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49,
    +	0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f,
    +	0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e,
    +	0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5,
    +	0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96,
    +	0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0,
    +	0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10,
    +	0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31,
    +	0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83,
    +	0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3,
    +	0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36,
    +	0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0,
    +	0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf,
    +	0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3,
    +	0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc,
    +	0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb,
    +	0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7,
    +	0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1,
    +	0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b,
    +	0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93,
    +	0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e,
    +	0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45,
    +	0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed,
    +	0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff,
    +	0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40,
    +	0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00,
    +	0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f,
    +	0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8,
    +	0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac,
    +	0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a,
    +	0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56,
    +	0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5,
    +	0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92,
    +	0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d,
    +	0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37,
    +	0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71,
    +	0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6,
    +	0x20, 0x64, 0x20, 0x00, 0x00,
    +}
    +
    +func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Controller)
    +	copy(dAtA[i:], m.Controller)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
    +	i--
    +	dAtA[i] = 0x22
    +	if m.NodeSelector != nil {
    +		{
    +			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	{
    +		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *BasicDevice) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Capacity) > 0 {
    +		keysForCapacity := make([]string, 0, len(m.Capacity))
    +		for k := range m.Capacity {
    +			keysForCapacity = append(keysForCapacity, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
    +		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForCapacity[iNdEx])
    +			copy(dAtA[i:], keysForCapacity[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Attributes) > 0 {
    +		keysForAttributes := make([]string, 0, len(m.Attributes))
    +		for k := range m.Attributes {
    +			keysForAttributes = append(keysForAttributes, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
    +		for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
    +			baseI := i
    +			{
    +				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForAttributes[iNdEx])
    +			copy(dAtA[i:], keysForAttributes[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Expression)
    +	copy(dAtA[i:], m.Expression)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *Device) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *Device) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Basic != nil {
    +		{
    +			size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Source)
    +	copy(dAtA[i:], m.Source)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Results) > 0 {
    +		for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.VersionValue != nil {
    +		i -= len(*m.VersionValue)
    +		copy(dAtA[i:], *m.VersionValue)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
    +		i--
    +		dAtA[i] = 0x2a
    +	}
    +	if m.StringValue != nil {
    +		i -= len(*m.StringValue)
    +		copy(dAtA[i:], *m.StringValue)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	if m.BoolValue != nil {
    +		i--
    +		if *m.BoolValue {
    +			dAtA[i] = 1
    +		} else {
    +			dAtA[i] = 0
    +		}
    +		i--
    +		dAtA[i] = 0x18
    +	}
    +	if m.IntValue != nil {
    +		i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
    +		i--
    +		dAtA[i] = 0x10
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	if len(m.Constraints) > 0 {
    +		for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClass) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.SuitableNodes != nil {
    +		{
    +			size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x1a
    +	}
    +	if len(m.Config) > 0 {
    +		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if len(m.Selectors) > 0 {
    +		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.Opaque != nil {
    +		{
    +			size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.MatchAttribute != nil {
    +		i -= len(*m.MatchAttribute)
    +		copy(dAtA[i:], *m.MatchAttribute)
    +		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute)))
    +		i--
    +		dAtA[i] = 0x12
    +	}
    +	if len(m.Requests) > 0 {
    +		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Requests[iNdEx])
    +			copy(dAtA[i:], m.Requests[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i--
    +	if m.AdminAccess {
    +		dAtA[i] = 1
    +	} else {
    +		dAtA[i] = 0
    +	}
    +	i--
    +	dAtA[i] = 0x30
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
    +	i--
    +	dAtA[i] = 0x28
    +	i -= len(m.AllocationMode)
    +	copy(dAtA[i:], m.AllocationMode)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
    +	i--
    +	dAtA[i] = 0x22
    +	if len(m.Selectors) > 0 {
    +		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	i -= len(m.DeviceClassName)
    +	copy(dAtA[i:], m.DeviceClassName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Device)
    +	copy(dAtA[i:], m.Device)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Pool)
    +	copy(dAtA[i:], m.Pool)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Request)
    +	copy(dAtA[i:], m.Request)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if m.CEL != nil {
    +		{
    +			size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.PotentialNodes) > 0 {
    +		for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.PotentialNodes[iNdEx])
    +			copy(dAtA[i:], m.PotentialNodes[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.SelectedNode)
    +	copy(dAtA[i:], m.SelectedNode)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.ResourceClaims) > 0 {
    +		for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0xa
    +		}
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.UID)
    +	copy(dAtA[i:], m.UID)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
    +	i--
    +	dAtA[i] = 0x2a
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0x22
    +	i -= len(m.Resource)
    +	copy(dAtA[i:], m.Resource)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
    +	i--
    +	dAtA[i] = 0x1a
    +	i -= len(m.APIGroup)
    +	copy(dAtA[i:], m.APIGroup)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.UnsuitableNodes) > 0 {
    +		for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.UnsuitableNodes[iNdEx])
    +			copy(dAtA[i:], m.UnsuitableNodes[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx])))
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i -= len(m.Controller)
    +	copy(dAtA[i:], m.Controller)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i--
    +	if m.DeallocationRequested {
    +		dAtA[i] = 1
    +	} else {
    +		dAtA[i] = 0
    +	}
    +	i--
    +	dAtA[i] = 0x18
    +	if len(m.ReservedFor) > 0 {
    +		for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	if m.Allocation != nil {
    +		{
    +			size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0xa
    +	}
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
    +	i--
    +	dAtA[i] = 0x18
    +	i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
    +	i--
    +	dAtA[i] = 0x10
    +	i -= len(m.Name)
    +	copy(dAtA[i:], m.Name)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	{
    +		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Devices) > 0 {
    +		for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x32
    +		}
    +	}
    +	i--
    +	if m.AllNodes {
    +		dAtA[i] = 1
    +	} else {
    +		dAtA[i] = 0
    +	}
    +	i--
    +	dAtA[i] = 0x28
    +	if m.NodeSelector != nil {
    +		{
    +			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
    +			if err != nil {
    +				return 0, err
    +			}
    +			i -= size
    +			i = encodeVarintGenerated(dAtA, i, uint64(size))
    +		}
    +		i--
    +		dAtA[i] = 0x22
    +	}
    +	i -= len(m.NodeName)
    +	copy(dAtA[i:], m.NodeName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
    +	i--
    +	dAtA[i] = 0x1a
    +	{
    +		size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Driver)
    +	copy(dAtA[i:], m.Driver)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
    +	offset -= sovGenerated(v)
    +	base := offset
    +	for v >= 1<<7 {
    +		dAtA[offset] = uint8(v&0x7f | 0x80)
    +		v >>= 7
    +		offset++
    +	}
    +	dAtA[offset] = uint8(v)
    +	return base
    +}
    +func (m *AllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Devices.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.NodeSelector != nil {
    +		l = m.NodeSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	l = len(m.Controller)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *BasicDevice) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Attributes) > 0 {
    +		for k, v := range m.Attributes {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	if len(m.Capacity) > 0 {
    +		for k, v := range m.Capacity {
    +			_ = k
    +			_ = v
    +			l = v.Size()
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *CELDeviceSelector) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Expression)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *Device) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.Basic != nil {
    +		l = m.Basic.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceAllocationConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Source)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceAllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Results) > 0 {
    +		for _, e := range m.Results {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceAttribute) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.IntValue != nil {
    +		n += 1 + sovGenerated(uint64(*m.IntValue))
    +	}
    +	if m.BoolValue != nil {
    +		n += 2
    +	}
    +	if m.StringValue != nil {
    +		l = len(*m.StringValue)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if m.VersionValue != nil {
    +		l = len(*m.VersionValue)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClaim) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, e := range m.Requests {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Constraints) > 0 {
    +		for _, e := range m.Constraints {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClaimConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClass) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClassConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.DeviceConfiguration.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceClassList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *DeviceClassSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Selectors) > 0 {
    +		for _, e := range m.Selectors {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if len(m.Config) > 0 {
    +		for _, e := range m.Config {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.SuitableNodes != nil {
    +		l = m.SuitableNodes.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Opaque != nil {
    +		l = m.Opaque.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceConstraint) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.Requests) > 0 {
    +		for _, s := range m.Requests {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	if m.MatchAttribute != nil {
    +		l = len(*m.MatchAttribute)
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *DeviceRequest) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.DeviceClassName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Selectors) > 0 {
    +		for _, e := range m.Selectors {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	l = len(m.AllocationMode)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.Count))
    +	n += 2
    +	return n
    +}
    +
    +func (m *DeviceRequestAllocationResult) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Request)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Pool)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Device)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *DeviceSelector) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.CEL != nil {
    +		l = m.CEL.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	return n
    +}
    +
    +func (m *OpaqueDeviceConfiguration) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Parameters.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *PodSchedulingContext) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *PodSchedulingContextList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *PodSchedulingContextSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.SelectedNode)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.PotentialNodes) > 0 {
    +		for _, s := range m.PotentialNodes {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *PodSchedulingContextStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if len(m.ResourceClaims) > 0 {
    +		for _, e := range m.ResourceClaims {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaim) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Status.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimConsumerReference) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.APIGroup)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Resource)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.UID)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimSchedulingStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.UnsuitableNodes) > 0 {
    +		for _, s := range m.UnsuitableNodes {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.Devices.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Controller)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimStatus) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	if m.Allocation != nil {
    +		l = m.Allocation.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	if len(m.ReservedFor) > 0 {
    +		for _, e := range m.ReservedFor {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	n += 2
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplate) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplateList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceClaimTemplateSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourcePool) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Name)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	n += 1 + sovGenerated(uint64(m.Generation))
    +	n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
    +	return n
    +}
    +
    +func (m *ResourceSlice) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Spec.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	return n
    +}
    +
    +func (m *ResourceSliceList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *ResourceSliceSpec) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Driver)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = m.Pool.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.NodeName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if m.NodeSelector != nil {
    +		l = m.NodeSelector.Size()
    +		n += 1 + l + sovGenerated(uint64(l))
    +	}
    +	n += 2
    +	if len(m.Devices) > 0 {
    +		for _, e := range m.Devices {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
    +func sovGenerated(x uint64) (n int) {
    +	return (math_bits.Len64(x|1) + 6) / 7
    +}
    +func sozGenerated(x uint64) (n int) {
    +	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
    +}
    +func (this *AllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&AllocationResult{`,
    +		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
    +		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`,
    +		`Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *BasicDevice) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForAttributes := make([]string, 0, len(this.Attributes))
    +	for k := range this.Attributes {
    +		keysForAttributes = append(keysForAttributes, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
    +	mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
    +	for _, k := range keysForAttributes {
    +		mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
    +	}
    +	mapStringForAttributes += "}"
    +	keysForCapacity := make([]string, 0, len(this.Capacity))
    +	for k := range this.Capacity {
    +		keysForCapacity = append(keysForCapacity, string(k))
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
    +	mapStringForCapacity := "map[QualifiedName]resource.Quantity{"
    +	for _, k := range keysForCapacity {
    +		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
    +	}
    +	mapStringForCapacity += "}"
    +	s := strings.Join([]string{`&BasicDevice{`,
    +		`Attributes:` + mapStringForAttributes + `,`,
    +		`Capacity:` + mapStringForCapacity + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *CELDeviceSelector) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&CELDeviceSelector{`,
    +		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *Device) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&Device{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAllocationConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceAllocationConfiguration{`,
    +		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForResults := "[]DeviceRequestAllocationResult{"
    +	for _, f := range this.Results {
    +		repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForResults += "}"
    +	repeatedStringForConfig := "[]DeviceAllocationConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceAllocationResult{`,
    +		`Results:` + repeatedStringForResults + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceAttribute) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceAttribute{`,
    +		`IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
    +		`BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
    +		`StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
    +		`VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClaim) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForRequests := "[]DeviceRequest{"
    +	for _, f := range this.Requests {
    +		repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForRequests += "}"
    +	repeatedStringForConstraints := "[]DeviceConstraint{"
    +	for _, f := range this.Constraints {
    +		repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConstraints += "}"
    +	repeatedStringForConfig := "[]DeviceClaimConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceClaim{`,
    +		`Requests:` + repeatedStringForRequests + `,`,
    +		`Constraints:` + repeatedStringForConstraints + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClaimConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClaimConfiguration{`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClass) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClass{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceClassConfiguration{`,
    +		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]DeviceClass{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&DeviceClassList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceClassSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForSelectors := "[]DeviceSelector{"
    +	for _, f := range this.Selectors {
    +		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSelectors += "}"
    +	repeatedStringForConfig := "[]DeviceClassConfiguration{"
    +	for _, f := range this.Config {
    +		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForConfig += "}"
    +	s := strings.Join([]string{`&DeviceClassSpec{`,
    +		`Selectors:` + repeatedStringForSelectors + `,`,
    +		`Config:` + repeatedStringForConfig + `,`,
    +		`SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceConfiguration{`,
    +		`Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceConstraint) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceConstraint{`,
    +		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
    +		`MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceRequest) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForSelectors := "[]DeviceSelector{"
    +	for _, f := range this.Selectors {
    +		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForSelectors += "}"
    +	s := strings.Join([]string{`&DeviceRequest{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
    +		`Selectors:` + repeatedStringForSelectors + `,`,
    +		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
    +		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
    +		`AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceRequestAllocationResult) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
    +		`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
    +		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *DeviceSelector) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&DeviceSelector{`,
    +		`CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *OpaqueDeviceConfiguration) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodSchedulingContext) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&PodSchedulingContext{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodSchedulingContextList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]PodSchedulingContext{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&PodSchedulingContextList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodSchedulingContextSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&PodSchedulingContextSpec{`,
    +		`SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`,
    +		`PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *PodSchedulingContextStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{"
    +	for _, f := range this.ResourceClaims {
    +		repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForResourceClaims += "}"
    +	s := strings.Join([]string{`&PodSchedulingContextStatus{`,
    +		`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaim) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaim{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    +		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimConsumerReference) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
    +		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
    +		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceClaim{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceClaimList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimSchedulingStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimSpec{`,
    +		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
    +		`Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimStatus) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
    +	for _, f := range this.ReservedFor {
    +		repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForReservedFor += "}"
    +	s := strings.Join([]string{`&ResourceClaimStatus{`,
    +		`Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
    +		`ReservedFor:` + repeatedStringForReservedFor + `,`,
    +		`DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplate) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimTemplate{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplateList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceClaimTemplate{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceClaimTemplateList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceClaimTemplateSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourcePool) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourcePool{`,
    +		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
    +		`Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
    +		`ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSlice) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&ResourceSlice{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSliceList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]ResourceSlice{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&ResourceSliceList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *ResourceSliceSpec) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForDevices := "[]Device{"
    +	for _, f := range this.Devices {
    +		repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForDevices += "}"
    +	s := strings.Join([]string{`&ResourceSliceSpec{`,
    +		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
    +		`Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
    +		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
    +		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`,
    +		`AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`,
    +		`Devices:` + repeatedStringForDevices + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func valueToStringGenerated(v interface{}) string {
    +	rv := reflect.ValueOf(v)
    +	if rv.IsNil() {
    +		return "nil"
    +	}
    +	pv := reflect.Indirect(rv).Interface()
    +	return fmt.Sprintf("*%v", pv)
    +}
    +func (m *AllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NodeSelector == nil {
    +				m.NodeSelector = &v1.NodeSelector{}
    +			}
    +			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Controller = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *BasicDevice) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Attributes == nil {
    +				m.Attributes = make(map[QualifiedName]DeviceAttribute)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &DeviceAttribute{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &DeviceAttribute{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Attributes[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Capacity == nil {
    +				m.Capacity = make(map[QualifiedName]resource.Quantity)
    +			}
    +			var mapkey QualifiedName
    +			mapvalue := &resource.Quantity{}
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var mapmsglen int
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						mapmsglen |= int(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					if mapmsglen < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postmsgIndex := iNdEx + mapmsglen
    +					if postmsgIndex < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postmsgIndex > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = &resource.Quantity{}
    +					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
    +						return err
    +					}
    +					iNdEx = postmsgIndex
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Capacity[QualifiedName(mapkey)] = *mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Expression = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *Device) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: Device: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Basic == nil {
    +				m.Basic = &BasicDevice{}
    +			}
    +			if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Results = append(m.Results, DeviceRequestAllocationResult{})
    +			if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceAllocationConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
    +			}
    +			var v int64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.IntValue = &v
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			b := bool(v != 0)
    +			m.BoolValue = &b
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.StringValue = &s
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := string(dAtA[iNdEx:postIndex])
    +			m.VersionValue = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, DeviceRequest{})
    +			if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Constraints = append(m.Constraints, DeviceConstraint{})
    +			if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceClaimConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClass) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, DeviceClass{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Selectors = append(m.Selectors, DeviceSelector{})
    +			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Config = append(m.Config, DeviceClassConfiguration{})
    +			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.SuitableNodes == nil {
    +				m.SuitableNodes = &v1.NodeSelector{}
    +			}
    +			if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Opaque == nil {
    +				m.Opaque = &OpaqueDeviceConfiguration{}
    +			}
    +			if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			s := FullyQualifiedName(dAtA[iNdEx:postIndex])
    +			m.MatchAttribute = &s
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.DeviceClassName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Selectors = append(m.Selectors, DeviceSelector{})
    +			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
    +			}
    +			m.Count = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Count |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 6:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.AdminAccess = bool(v != 0)
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Request = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Pool = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Device = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.CEL == nil {
    +				m.CEL = &CELDeviceSelector{}
    +			}
    +			if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, PodSchedulingContext{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.SelectedNode = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{})
    +			if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.APIGroup = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Resource = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceClaim{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Controller = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Allocation == nil {
    +				m.Allocation = &AllocationResult{}
    +			}
    +			if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
    +			if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.DeallocationRequested = bool(v != 0)
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceClaimTemplate{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourcePool) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Name = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
    +			}
    +			m.Generation = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.Generation |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		case 3:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType)
    +			}
    +			m.ResourceSliceCount = 0
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				m.ResourceSliceCount |= int64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, ResourceSlice{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Driver = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.NodeName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 4:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.NodeSelector == nil {
    +				m.NodeSelector = &v1.NodeSelector{}
    +			}
    +			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 5:
    +			if wireType != 0 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
    +			}
    +			var v int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				v |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			m.AllNodes = bool(v != 0)
    +		case 6:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Devices = append(m.Devices, Device{})
    +			if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func skipGenerated(dAtA []byte) (n int, err error) {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	depth := 0
    +	for iNdEx < l {
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return 0, ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return 0, io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= (uint64(b) & 0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		wireType := int(wire & 0x7)
    +		switch wireType {
    +		case 0:
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				iNdEx++
    +				if dAtA[iNdEx-1] < 0x80 {
    +					break
    +				}
    +			}
    +		case 1:
    +			iNdEx += 8
    +		case 2:
    +			var length int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return 0, ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return 0, io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				length |= (int(b) & 0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if length < 0 {
    +				return 0, ErrInvalidLengthGenerated
    +			}
    +			iNdEx += length
    +		case 3:
    +			depth++
    +		case 4:
    +			if depth == 0 {
    +				return 0, ErrUnexpectedEndOfGroupGenerated
    +			}
    +			depth--
    +		case 5:
    +			iNdEx += 4
    +		default:
    +			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
    +		}
    +		if iNdEx < 0 {
    +			return 0, ErrInvalidLengthGenerated
    +		}
    +		if depth == 0 {
    +			return iNdEx, nil
    +		}
    +	}
    +	return 0, io.ErrUnexpectedEOF
    +}
    +
    +var (
    +	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
    +	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
    +	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
    +)
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
    new file mode 100644
    index 0000000000..b4428ad452
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
    @@ -0,0 +1,912 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +
    +// This file was autogenerated by go-to-protobuf. Do not edit it manually!
    +
    +syntax = "proto2";
    +
    +package k8s.io.api.resource.v1alpha3;
    +
    +import "k8s.io/api/core/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
    +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/generated.proto";
    +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
    +
    +// Package-wide variables from generator "generated".
    +option go_package = "k8s.io/api/resource/v1alpha3";
    +
    +// AllocationResult contains attributes of an allocated resource.
    +message AllocationResult {
    +  // Devices is the result of allocating devices.
    +  //
    +  // +optional
    +  optional DeviceAllocationResult devices = 1;
    +
    +  // NodeSelector defines where the allocated resources are available. If
    +  // unset, they are available everywhere.
    +  //
    +  // +optional
    +  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
    +
    +  // Controller is the name of the DRA driver which handled the
    +  // allocation. That driver is also responsible for deallocating the
    +  // claim. It is empty when the claim can be deallocated without
    +  // involving a driver.
    +  //
    +  // A driver may allocate devices provided by other drivers, so this
    +  // driver name here can be different from the driver names listed for
    +  // the results.
    +  //
    +  // This is an alpha field and requires enabling the DRAControlPlaneController
    +  // feature gate.
    +  //
    +  // +optional
    +  // +featureGate=DRAControlPlaneController
    +  optional string controller = 4;
    +}
    +
    +// BasicDevice defines one device instance.
    +message BasicDevice {
    +  // Attributes defines the set of attributes for this device.
    +  // The name of each attribute must be unique in that set.
    +  //
    +  // The maximum number of attributes and capacities combined is 32.
    +  //
    +  // +optional
    +  map attributes = 1;
    +
    +  // Capacity defines the set of capacities for this device.
    +  // The name of each capacity must be unique in that set.
    +  //
    +  // The maximum number of attributes and capacities combined is 32.
    +  //
    +  // +optional
    +  map capacity = 2;
    +}
    +
    +// CELDeviceSelector contains a CEL expression for selecting a device.
    +message CELDeviceSelector {
    +  // Expression is a CEL expression which evaluates a single device. It
    +  // must evaluate to true when the device under consideration satisfies
    +  // the desired criteria, and false when it does not. Any other result
    +  // is an error and causes allocation of devices to abort.
    +  //
    +  // The expression's input is an object named "device", which carries
    +  // the following properties:
    +  //  - driver (string): the name of the driver which defines this device.
    +  //  - attributes (map[string]object): the device's attributes, grouped by prefix
    +  //    (e.g. device.attributes["dra.example.com"] evaluates to an object with all
    +  //    of the attributes which were prefixed by "dra.example.com".
    +  //  - capacity (map[string]object): the device's capacities, grouped by prefix.
    +  //
    +  // Example: Consider a device with driver="dra.example.com", which exposes
    +  // two attributes named "model" and "ext.example.com/family" and which
    +  // exposes one capacity named "modules". This input to this expression
    +  // would have the following fields:
    +  //
    +  //     device.driver
    +  //     device.attributes["dra.example.com"].model
    +  //     device.attributes["ext.example.com"].family
    +  //     device.capacity["dra.example.com"].modules
    +  //
    +  // The device.driver field can be used to check for a specific driver,
    +  // either as a high-level precondition (i.e. you only want to consider
    +  // devices from this driver) or as part of a multi-clause expression
    +  // that is meant to consider devices from different drivers.
    +  //
    +  // The value type of each attribute is defined by the device
    +  // definition, and users who write these expressions must consult the
    +  // documentation for their specific drivers. The value type of each
    +  // capacity is Quantity.
    +  //
    +  // If an unknown prefix is used as a lookup in either device.attributes
    +  // or device.capacity, an empty map will be returned. Any reference to
    +  // an unknown field will cause an evaluation error and allocation to
    +  // abort.
    +  //
    +  // A robust expression should check for the existence of attributes
    +  // before referencing them.
    +  //
    +  // For ease of use, the cel.bind() function is enabled, and can be used
    +  // to simplify expressions that access multiple attributes with the
    +  // same domain. For example:
    +  //
    +  //     cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
    +  //
    +  // +required
    +  optional string expression = 1;
    +}
    +
    +// Device represents one individual hardware instance that can be selected based
    +// on its attributes. Besides the name, exactly one field must be set.
    +message Device {
    +  // Name is unique identifier among all devices managed by
    +  // the driver in the pool. It must be a DNS label.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // Basic defines one device instance.
    +  //
    +  // +optional
    +  // +oneOf=deviceType
    +  optional BasicDevice basic = 2;
    +}
    +
    +// DeviceAllocationConfiguration gets embedded in an AllocationResult.
    +message DeviceAllocationConfiguration {
    +  // Source records whether the configuration comes from a class and thus
    +  // is not something that a normal user would have been able to set
    +  // or from a claim.
    +  //
    +  // +required
    +  optional string source = 1;
    +
    +  // Requests lists the names of requests where the configuration applies.
    +  // If empty, its applies to all requests.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated string requests = 2;
    +
    +  optional DeviceConfiguration deviceConfiguration = 3;
    +}
    +
    +// DeviceAllocationResult is the result of allocating devices.
    +message DeviceAllocationResult {
    +  // Results lists all allocated devices.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceRequestAllocationResult results = 1;
    +
    +  // This field is a combination of all the claim and class configuration parameters.
    +  // Drivers can distinguish between those based on a flag.
    +  //
    +  // This includes configuration parameters for drivers which have no allocated
    +  // devices in the result because it is up to the drivers which configuration
    +  // parameters they support. They can silently ignore unknown configuration
    +  // parameters.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceAllocationConfiguration config = 2;
    +}
    +
    +// DeviceAttribute must have exactly one field set.
    +message DeviceAttribute {
    +  // IntValue is a number.
    +  //
    +  // +optional
    +  // +oneOf=ValueType
    +  optional int64 int = 2;
    +
    +  // BoolValue is a true/false value.
    +  //
    +  // +optional
    +  // +oneOf=ValueType
    +  optional bool bool = 3;
    +
    +  // StringValue is a string. Must not be longer than 64 characters.
    +  //
    +  // +optional
    +  // +oneOf=ValueType
    +  optional string string = 4;
    +
    +  // VersionValue is a semantic version according to semver.org spec 2.0.0.
    +  // Must not be longer than 64 characters.
    +  //
    +  // +optional
    +  // +oneOf=ValueType
    +  optional string version = 5;
    +}
    +
    +// DeviceClaim defines how to request devices with a ResourceClaim.
    +message DeviceClaim {
    +  // Requests represent individual requests for distinct devices which
    +  // must all be satisfied. If empty, nothing needs to be allocated.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceRequest requests = 1;
    +
    +  // These constraints must be satisfied by the set of devices that get
    +  // allocated for the claim.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceConstraint constraints = 2;
    +
    +  // This field holds configuration for multiple potential drivers which
    +  // could satisfy requests in this claim. It is ignored while allocating
    +  // the claim.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceClaimConfiguration config = 3;
    +}
    +
    +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
    +message DeviceClaimConfiguration {
    +  // Requests lists the names of requests where the configuration applies.
    +  // If empty, it applies to all requests.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated string requests = 1;
    +
    +  optional DeviceConfiguration deviceConfiguration = 2;
    +}
    +
    +// DeviceClass is a vendor- or admin-provided resource that contains
    +// device configuration and selectors. It can be referenced in
    +// the device requests of a claim to apply these presets.
    +// Cluster scoped.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +message DeviceClass {
    +  // Standard object metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Spec defines what can be allocated and how to configure it.
    +  //
    +  // This is mutable. Consumers have to be prepared for classes changing
    +  // at any time, either because they get updated or replaced. Claim
    +  // allocations are done once based on whatever was set in classes at
    +  // the time of allocation.
    +  //
    +  // Changing the spec automatically increments the metadata.generation number.
    +  optional DeviceClassSpec spec = 2;
    +}
    +
    +// DeviceClassConfiguration is used in DeviceClass.
    +message DeviceClassConfiguration {
    +  optional DeviceConfiguration deviceConfiguration = 1;
    +}
    +
    +// DeviceClassList is a collection of classes.
    +message DeviceClassList {
    +  // Standard list metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // Items is the list of resource classes.
    +  repeated DeviceClass items = 2;
    +}
    +
    +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
    +// and how to configure it.
    +message DeviceClassSpec {
    +  // Each selector must be satisfied by a device which is claimed via this class.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceSelector selectors = 1;
    +
    +  // Config defines configuration parameters that apply to each device that is claimed via this class.
    +  // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
    +  // configuration applies to exactly one driver.
    +  //
    +  // They are passed to the driver, but are not considered while allocating the claim.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceClassConfiguration config = 2;
    +
    +  // Only nodes matching the selector will be considered by the scheduler
    +  // when trying to find a Node that fits a Pod when that Pod uses
    +  // a claim that has not been allocated yet *and* that claim
    +  // gets allocated through a control plane controller. It is ignored
    +  // when the claim does not use a control plane controller
    +  // for allocation.
    +  //
    +  // Setting this field is optional. If unset, all Nodes are candidates.
    +  //
    +  // This is an alpha field and requires enabling the DRAControlPlaneController
    +  // feature gate.
    +  //
    +  // +optional
    +  // +featureGate=DRAControlPlaneController
    +  optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3;
    +}
    +
    +// DeviceConfiguration must have exactly one field set. It gets embedded
    +// inline in some other structs which have other fields, so field names must
    +// not conflict with those.
    +message DeviceConfiguration {
    +  // Opaque provides driver-specific configuration parameters.
    +  //
    +  // +optional
    +  // +oneOf=ConfigurationType
    +  optional OpaqueDeviceConfiguration opaque = 1;
    +}
    +
    +// DeviceConstraint must have exactly one field set besides Requests.
    +message DeviceConstraint {
    +  // Requests is a list of the one or more requests in this claim which
    +  // must co-satisfy this constraint. If a request is fulfilled by
    +  // multiple devices, then all of the devices must satisfy the
    +  // constraint. If this is not specified, this constraint applies to all
    +  // requests in this claim.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated string requests = 1;
    +
    +  // MatchAttribute requires that all devices in question have this
    +  // attribute and that its type and value are the same across those
    +  // devices.
    +  //
    +  // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
    +  // then only devices in the same NUMA node will be chosen. A device which
    +  // does not have that attribute will not be chosen. All devices should
    +  // use a value of the same type for this attribute because that is part of
    +  // its specification, but if one device doesn't, then it also will not be
    +  // chosen.
    +  //
    +  // Must include the domain qualifier.
    +  //
    +  // +optional
    +  // +oneOf=ConstraintType
    +  optional string matchAttribute = 2;
    +}
    +
    +// DeviceRequest is a request for devices required for a claim.
    +// This is typically a request for a single resource like a device, but can
    +// also ask for several identical devices.
    +//
    +// A DeviceClassName is currently required. Clients must check that it is
    +// indeed set. It's absence indicates that something changed in a way that
    +// is not supported by the client yet, in which case it must refuse to
    +// handle the request.
    +message DeviceRequest {
    +  // Name can be used to reference this request in a pod.spec.containers[].resources.claims
    +  // entry and in a constraint of the claim.
    +  //
    +  // Must be a DNS label.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // DeviceClassName references a specific DeviceClass, which can define
    +  // additional configuration and selectors to be inherited by this
    +  // request.
    +  //
    +  // A class is required. Which classes are available depends on the cluster.
    +  //
    +  // Administrators may use this to restrict which devices may get
    +  // requested by only installing classes with selectors for permitted
    +  // devices. If users are free to request anything without restrictions,
    +  // then administrators can create an empty DeviceClass for users
    +  // to reference.
    +  //
    +  // +required
    +  optional string deviceClassName = 2;
    +
    +  // Selectors define criteria which must be satisfied by a specific
    +  // device in order for that device to be considered for this
    +  // request. All selectors must be satisfied for a device to be
    +  // considered.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated DeviceSelector selectors = 3;
    +
    +  // AllocationMode and its related fields define how devices are allocated
    +  // to satisfy this request. Supported values are:
    +  //
    +  // - ExactCount: This request is for a specific number of devices.
    +  //   This is the default. The exact number is provided in the
    +  //   count field.
    +  //
    +  // - All: This request is for all of the matching devices in a pool.
    +  //   Allocation will fail if some devices are already allocated,
    +  //   unless adminAccess is requested.
    +  //
    +  // If AlloctionMode is not specified, the default mode is ExactCount. If
    +  // the mode is ExactCount and count is not specified, the default count is
    +  // one. Any other requests must specify this field.
    +  //
    +  // More modes may get added in the future. Clients must refuse to handle
    +  // requests with unknown modes.
    +  //
    +  // +optional
    +  optional string allocationMode = 4;
    +
    +  // Count is used only when the count mode is "ExactCount". Must be greater than zero.
    +  // If AllocationMode is ExactCount and this field is not specified, the default is one.
    +  //
    +  // +optional
    +  // +oneOf=AllocationMode
    +  optional int64 count = 5;
    +
    +  // AdminAccess indicates that this is a claim for administrative access
    +  // to the device(s). Claims with AdminAccess are expected to be used for
    +  // monitoring or other management services for a device.  They ignore
    +  // all ordinary claims to the device with respect to access modes and
    +  // any resource allocations.
    +  //
    +  // +optional
    +  // +default=false
    +  optional bool adminAccess = 6;
    +}
    +
    +// DeviceRequestAllocationResult contains the allocation result for one request.
    +message DeviceRequestAllocationResult {
    +  // Request is the name of the request in the claim which caused this
    +  // device to be allocated. Multiple devices may have been allocated
    +  // per request.
    +  //
    +  // +required
    +  optional string request = 1;
    +
    +  // Driver specifies the name of the DRA driver whose kubelet
    +  // plugin should be invoked to process the allocation once the claim is
    +  // needed on a node.
    +  //
    +  // Must be a DNS subdomain and should end with a DNS domain owned by the
    +  // vendor of the driver.
    +  //
    +  // +required
    +  optional string driver = 2;
    +
    +  // This name together with the driver name and the device name field
    +  // identify which device was allocated (`//`).
    +  //
    +  // Must not be longer than 253 characters and may contain one or more
    +  // DNS sub-domains separated by slashes.
    +  //
    +  // +required
    +  optional string pool = 3;
    +
    +  // Device references one device instance via its name in the driver's
    +  // resource pool. It must be a DNS label.
    +  //
    +  // +required
    +  optional string device = 4;
    +}
    +
    +// DeviceSelector must have exactly one field set.
    +message DeviceSelector {
    +  // CEL contains a CEL expression for selecting a device.
    +  //
    +  // +optional
    +  // +oneOf=SelectorType
    +  optional CELDeviceSelector cel = 1;
    +}
    +
    +// OpaqueDeviceConfiguration contains configuration parameters for a driver
    +// in a format defined by the driver vendor.
    +message OpaqueDeviceConfiguration {
    +  // Driver is used to determine which kubelet plugin needs
    +  // to be passed these configuration parameters.
    +  //
    +  // An admission policy provided by the driver developer could use this
    +  // to decide whether it needs to validate them.
    +  //
    +  // Must be a DNS subdomain and should end with a DNS domain owned by the
    +  // vendor of the driver.
    +  //
    +  // +required
    +  optional string driver = 1;
    +
    +  // Parameters can contain arbitrary data. It is the responsibility of
    +  // the driver developer to handle validation and versioning. Typically this
    +  // includes self-identification and a version ("kind" + "apiVersion" for
    +  // Kubernetes types), with conversion between different versions.
    +  //
    +  // +required
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2;
    +}
    +
    +// PodSchedulingContext objects hold information that is needed to schedule
    +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
    +// mode.
    +//
    +// This is an alpha type and requires enabling the DRAControlPlaneController
    +// feature gate.
    +message PodSchedulingContext {
    +  // Standard object metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Spec describes where resources for the Pod are needed.
    +  optional PodSchedulingContextSpec spec = 2;
    +
    +  // Status describes where resources for the Pod can be allocated.
    +  //
    +  // +optional
    +  optional PodSchedulingContextStatus status = 3;
    +}
    +
    +// PodSchedulingContextList is a collection of Pod scheduling objects.
    +message PodSchedulingContextList {
    +  // Standard list metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // Items is the list of PodSchedulingContext objects.
    +  repeated PodSchedulingContext items = 2;
    +}
    +
    +// PodSchedulingContextSpec describes where resources for the Pod are needed.
    +message PodSchedulingContextSpec {
    +  // SelectedNode is the node for which allocation of ResourceClaims that
    +  // are referenced by the Pod and that use "WaitForFirstConsumer"
    +  // allocation is to be attempted.
    +  //
    +  // +optional
    +  optional string selectedNode = 1;
    +
    +  // PotentialNodes lists nodes where the Pod might be able to run.
    +  //
    +  // The size of this field is limited to 128. This is large enough for
    +  // many clusters. Larger clusters may need more attempts to find a node
    +  // that suits all pending resources. This may get increased in the
    +  // future, but not reduced.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated string potentialNodes = 2;
    +}
    +
    +// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
    +message PodSchedulingContextStatus {
    +  // ResourceClaims describes resource availability for each
    +  // pod.spec.resourceClaim entry where the corresponding ResourceClaim
    +  // uses "WaitForFirstConsumer" allocation mode.
    +  //
    +  // +listType=map
    +  // +listMapKey=name
    +  // +optional
    +  repeated ResourceClaimSchedulingStatus resourceClaims = 1;
    +}
    +
    +// ResourceClaim describes a request for access to resources in the cluster,
    +// for use by workloads. For example, if a workload needs an accelerator device
    +// with specific properties, this is how that request is expressed. The status
    +// stanza tracks whether this claim has been satisfied and what specific
    +// resources have been allocated.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +message ResourceClaim {
    +  // Standard object metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Spec describes what is being requested and how to configure it.
    +  // The spec is immutable.
    +  optional ResourceClaimSpec spec = 2;
    +
    +  // Status describes whether the claim is ready to use and what has been allocated.
    +  // +optional
    +  optional ResourceClaimStatus status = 3;
    +}
    +
    +// ResourceClaimConsumerReference contains enough information to let you
    +// locate the consumer of a ResourceClaim. The user must be a resource in the same
    +// namespace as the ResourceClaim.
    +message ResourceClaimConsumerReference {
    +  // APIGroup is the group for the resource being referenced. It is
    +  // empty for the core API. This matches the group in the APIVersion
    +  // that is used when creating the resources.
    +  // +optional
    +  optional string apiGroup = 1;
    +
    +  // Resource is the type of resource being referenced, for example "pods".
    +  // +required
    +  optional string resource = 3;
    +
    +  // Name is the name of resource being referenced.
    +  // +required
    +  optional string name = 4;
    +
    +  // UID identifies exactly one incarnation of the resource.
    +  // +required
    +  optional string uid = 5;
    +}
    +
    +// ResourceClaimList is a collection of claims.
    +message ResourceClaimList {
    +  // Standard list metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // Items is the list of resource claims.
    +  repeated ResourceClaim items = 2;
    +}
    +
    +// ResourceClaimSchedulingStatus contains information about one particular
    +// ResourceClaim with "WaitForFirstConsumer" allocation mode.
    +message ResourceClaimSchedulingStatus {
    +  // Name matches the pod.spec.resourceClaims[*].Name field.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // UnsuitableNodes lists nodes that the ResourceClaim cannot be
    +  // allocated for.
    +  //
    +  // The size of this field is limited to 128, the same as for
    +  // PodSchedulingSpec.PotentialNodes. This may get increased in the
    +  // future, but not reduced.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated string unsuitableNodes = 2;
    +}
    +
    +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
    +message ResourceClaimSpec {
    +  // Devices defines how to request devices.
    +  //
    +  // +optional
    +  optional DeviceClaim devices = 1;
    +
    +  // Controller is the name of the DRA driver that is meant
    +  // to handle allocation of this claim. If empty, allocation is handled
    +  // by the scheduler while scheduling a pod.
    +  //
    +  // Must be a DNS subdomain and should end with a DNS domain owned by the
    +  // vendor of the driver.
    +  //
    +  // This is an alpha field and requires enabling the DRAControlPlaneController
    +  // feature gate.
    +  //
    +  // +optional
    +  // +featureGate=DRAControlPlaneController
    +  optional string controller = 2;
    +}
    +
    +// ResourceClaimStatus tracks whether the resource has been allocated and what
    +// the result of that was.
    +message ResourceClaimStatus {
    +  // Allocation is set once the claim has been allocated successfully.
    +  //
    +  // +optional
    +  optional AllocationResult allocation = 1;
    +
    +  // ReservedFor indicates which entities are currently allowed to use
    +  // the claim. A Pod which references a ResourceClaim which is not
    +  // reserved for that Pod will not be started. A claim that is in
    +  // use or might be in use because it has been reserved must not get
    +  // deallocated.
    +  //
    +  // In a cluster with multiple scheduler instances, two pods might get
    +  // scheduled concurrently by different schedulers. When they reference
    +  // the same ResourceClaim which already has reached its maximum number
    +  // of consumers, only one pod can be scheduled.
    +  //
    +  // Both schedulers try to add their pod to the claim.status.reservedFor
    +  // field, but only the update that reaches the API server first gets
    +  // stored. The other one fails with an error and the scheduler
    +  // which issued it knows that it must put the pod back into the queue,
    +  // waiting for the ResourceClaim to become usable again.
    +  //
    +  // There can be at most 32 such reservations. This may get increased in
    +  // the future, but not reduced.
    +  //
    +  // +optional
    +  // +listType=map
    +  // +listMapKey=uid
    +  // +patchStrategy=merge
    +  // +patchMergeKey=uid
    +  repeated ResourceClaimConsumerReference reservedFor = 2;
    +
    +  // Indicates that a claim is to be deallocated. While this is set,
    +  // no new consumers may be added to ReservedFor.
    +  //
    +  // This is only used if the claim needs to be deallocated by a DRA driver.
    +  // That driver then must deallocate this claim and reset the field
    +  // together with clearing the Allocation field.
    +  //
    +  // This is an alpha field and requires enabling the DRAControlPlaneController
    +  // feature gate.
    +  //
    +  // +optional
    +  // +featureGate=DRAControlPlaneController
    +  optional bool deallocationRequested = 3;
    +}
    +
    +// ResourceClaimTemplate is used to produce ResourceClaim objects.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +message ResourceClaimTemplate {
    +  // Standard object metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Describes the ResourceClaim that is to be generated.
    +  //
    +  // This field is immutable. A ResourceClaim will get created by the
    +  // control plane for a Pod when needed and then not get updated
    +  // anymore.
    +  optional ResourceClaimTemplateSpec spec = 2;
    +}
    +
    +// ResourceClaimTemplateList is a collection of claim templates.
    +message ResourceClaimTemplateList {
    +  // Standard list metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // Items is the list of resource claim templates.
    +  repeated ResourceClaimTemplate items = 2;
    +}
    +
    +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
    +message ResourceClaimTemplateSpec {
    +  // ObjectMeta may contain labels and annotations that will be copied into the PVC
    +  // when creating it. No other fields are allowed and will be rejected during
    +  // validation.
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Spec for the ResourceClaim. The entire content is copied unchanged
    +  // into the ResourceClaim that gets created from this template. The
    +  // same fields as in a ResourceClaim are also valid here.
    +  optional ResourceClaimSpec spec = 2;
    +}
    +
    +// ResourcePool describes the pool that ResourceSlices belong to.
    +message ResourcePool {
    +  // Name is used to identify the pool. For node-local devices, this
    +  // is often the node name, but this is not required.
    +  //
    +  // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
    +  // separated by slashes. This field is immutable.
    +  //
    +  // +required
    +  optional string name = 1;
    +
    +  // Generation tracks the change in a pool over time. Whenever a driver
    +  // changes something about one or more of the resources in a pool, it
    +  // must change the generation in all ResourceSlices which are part of
    +  // that pool. Consumers of ResourceSlices should only consider
    +  // resources from the pool with the highest generation number. The
    +  // generation may be reset by drivers, which should be fine for
    +  // consumers, assuming that all ResourceSlices in a pool are updated to
    +  // match or deleted.
    +  //
    +  // Combined with ResourceSliceCount, this mechanism enables consumers to
    +  // detect pools which are comprised of multiple ResourceSlices and are
    +  // in an incomplete state.
    +  //
    +  // +required
    +  optional int64 generation = 2;
    +
    +  // ResourceSliceCount is the total number of ResourceSlices in the pool at this
    +  // generation number. Must be greater than zero.
    +  //
    +  // Consumers can use this to check whether they have seen all ResourceSlices
    +  // belonging to the same pool.
    +  //
    +  // +required
    +  optional int64 resourceSliceCount = 3;
    +}
    +
    +// ResourceSlice represents one or more resources in a pool of similar resources,
    +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
    +// ResourceSlices comprise a pool is determined by the driver.
    +//
    +// At the moment, the only supported resources are devices with attributes and capacities.
    +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
    +// The ResourceSlice in which a device gets published may change over time. The unique identifier
    +// for a device is the tuple , , .
    +//
    +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
    +// and updates all ResourceSlices with that new number and new resource definitions. A consumer
    +// must only use ResourceSlices with the highest generation number and ignore all others.
    +//
    +// When allocating all resources in a pool matching certain criteria or when
    +// looking for the best solution among several different alternatives, a
    +// consumer should check the number of ResourceSlices in a pool (included in
    +// each ResourceSlice) to determine whether its view of a pool is complete and
    +// if not, should wait until the driver has completed updating the pool.
    +//
    +// For resources that are not local to a node, the node name is not set. Instead,
    +// the driver may use a node selector to specify where the devices are available.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +message ResourceSlice {
    +  // Standard object metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Contains the information published by the driver.
    +  //
    +  // Changing the spec automatically increments the metadata.generation number.
    +  optional ResourceSliceSpec spec = 2;
    +}
    +
    +// ResourceSliceList is a collection of ResourceSlices.
    +message ResourceSliceList {
    +  // Standard list metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // Items is the list of resource ResourceSlices.
    +  repeated ResourceSlice items = 2;
    +}
    +
    +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
    +message ResourceSliceSpec {
    +  // Driver identifies the DRA driver providing the capacity information.
    +  // A field selector can be used to list only ResourceSlice
    +  // objects with a certain driver name.
    +  //
    +  // Must be a DNS subdomain and should end with a DNS domain owned by the
    +  // vendor of the driver. This field is immutable.
    +  //
    +  // +required
    +  optional string driver = 1;
    +
    +  // Pool describes the pool that this ResourceSlice belongs to.
    +  //
    +  // +required
    +  optional ResourcePool pool = 2;
    +
    +  // NodeName identifies the node which provides the resources in this pool.
    +  // A field selector can be used to list only ResourceSlice
    +  // objects belonging to a certain node.
    +  //
    +  // This field can be used to limit access from nodes to ResourceSlices with
    +  // the same node name. It also indicates to autoscalers that adding
    +  // new nodes of the same type as some old node might also make new
    +  // resources available.
    +  //
    +  // Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +  // This field is immutable.
    +  //
    +  // +optional
    +  // +oneOf=NodeSelection
    +  optional string nodeName = 3;
    +
    +  // NodeSelector defines which nodes have access to the resources in the pool,
    +  // when that pool is not limited to a single node.
    +  //
    +  // Must use exactly one term.
    +  //
    +  // Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +  //
    +  // +optional
    +  // +oneOf=NodeSelection
    +  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4;
    +
    +  // AllNodes indicates that all nodes have access to the resources in the pool.
    +  //
    +  // Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +  //
    +  // +optional
    +  // +oneOf=NodeSelection
    +  optional bool allNodes = 5;
    +
    +  // Devices lists some or all of the devices in this pool.
    +  //
    +  // Must not have more than 128 entries.
    +  //
    +  // +optional
    +  // +listType=atomic
    +  repeated Device devices = 6;
    +}
    +
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go
    new file mode 100644
    index 0000000000..74044e8cf0
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/register.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright 2022 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha3
    +
    +import (
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +)
    +
    +// GroupName is the group name use in this package
    +const GroupName = "resource.k8s.io"
    +
    +// SchemeGroupVersion is group version used to register these objects
    +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"}
    +
    +// Resource takes an unqualified resource and returns a Group qualified GroupResource
    +func Resource(resource string) schema.GroupResource {
    +	return SchemeGroupVersion.WithResource(resource).GroupResource()
    +}
    +
    +var (
    +	// We only register manually written functions here. The registration of the
    +	// generated functions takes place in the generated files. The separation
    +	// makes the code compile even when the generated files are missing.
    +	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
    +	AddToScheme   = SchemeBuilder.AddToScheme
    +)
    +
    +// Adds the list of known types to the given scheme.
    +func addKnownTypes(scheme *runtime.Scheme) error {
    +	scheme.AddKnownTypes(SchemeGroupVersion,
    +		&DeviceClass{},
    +		&DeviceClassList{},
    +		&ResourceClaim{},
    +		&ResourceClaimList{},
    +		&ResourceClaimTemplate{},
    +		&ResourceClaimTemplateList{},
    +		&PodSchedulingContext{},
    +		&PodSchedulingContextList{},
    +		&ResourceSlice{},
    +		&ResourceSliceList{},
    +	)
    +
    +	// Add the watch version that applies
    +	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    +	return nil
    +}
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go
    new file mode 100644
    index 0000000000..4efd2491de
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go
    @@ -0,0 +1,1048 @@
    +/*
    +Copyright 2022 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha3
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/api/resource"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/types"
    +	"k8s.io/apimachinery/pkg/util/validation"
    +)
    +
    +const (
    +	// Finalizer is the finalizer that gets set for claims
    +	// which were allocated through a builtin controller.
    +	// Reserved for use by Kubernetes, DRA driver controllers must
    +	// use their own finalizer.
    +	Finalizer = "resource.kubernetes.io/delete-protection"
    +)
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// ResourceSlice represents one or more resources in a pool of similar resources,
    +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
    +// ResourceSlices comprise a pool is determined by the driver.
    +//
    +// At the moment, the only supported resources are devices with attributes and capacities.
    +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
    +// The ResourceSlice in which a device gets published may change over time. The unique identifier
    +// for a device is the tuple , , .
    +//
    +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
    +// and updates all ResourceSlices with that new number and new resource definitions. A consumer
    +// must only use ResourceSlices with the highest generation number and ignore all others.
    +//
    +// When allocating all resources in a pool matching certain criteria or when
    +// looking for the best solution among several different alternatives, a
    +// consumer should check the number of ResourceSlices in a pool (included in
    +// each ResourceSlice) to determine whether its view of a pool is complete and
    +// if not, should wait until the driver has completed updating the pool.
    +//
    +// For resources that are not local to a node, the node name is not set. Instead,
    +// the driver may use a node selector to specify where the devices are available.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +type ResourceSlice struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Contains the information published by the driver.
    +	//
    +	// Changing the spec automatically increments the metadata.generation number.
    +	Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +}
    +
    +const (
    +	// ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions]
    +	// field selector to filter based on [ResourceSliceSpec.NodeName].
    +	ResourceSliceSelectorNodeName = "spec.nodeName"
    +	// ResourceSliceSelectorDriver can be used in a [metav1.ListOptions]
    +	// field selector to filter based on [ResourceSliceSpec.Driver].
    +	ResourceSliceSelectorDriver = "spec.driver"
    +)
    +
    +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
    +type ResourceSliceSpec struct {
    +	// Driver identifies the DRA driver providing the capacity information.
    +	// A field selector can be used to list only ResourceSlice
    +	// objects with a certain driver name.
    +	//
    +	// Must be a DNS subdomain and should end with a DNS domain owned by the
    +	// vendor of the driver. This field is immutable.
    +	//
    +	// +required
    +	Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
    +
    +	// Pool describes the pool that this ResourceSlice belongs to.
    +	//
    +	// +required
    +	Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"`
    +
    +	// NodeName identifies the node which provides the resources in this pool.
    +	// A field selector can be used to list only ResourceSlice
    +	// objects belonging to a certain node.
    +	//
    +	// This field can be used to limit access from nodes to ResourceSlices with
    +	// the same node name. It also indicates to autoscalers that adding
    +	// new nodes of the same type as some old node might also make new
    +	// resources available.
    +	//
    +	// Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +	// This field is immutable.
    +	//
    +	// +optional
    +	// +oneOf=NodeSelection
    +	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"`
    +
    +	// NodeSelector defines which nodes have access to the resources in the pool,
    +	// when that pool is not limited to a single node.
    +	//
    +	// Must use exactly one term.
    +	//
    +	// Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +	//
    +	// +optional
    +	// +oneOf=NodeSelection
    +	NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"`
    +
    +	// AllNodes indicates that all nodes have access to the resources in the pool.
    +	//
    +	// Exactly one of NodeName, NodeSelector and AllNodes must be set.
    +	//
    +	// +optional
    +	// +oneOf=NodeSelection
    +	AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"`
    +
    +	// Devices lists some or all of the devices in this pool.
    +	//
    +	// Must not have more than 128 entries.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
    +}
    +
    +// ResourcePool describes the pool that ResourceSlices belong to.
    +type ResourcePool struct {
    +	// Name is used to identify the pool. For node-local devices, this
    +	// is often the node name, but this is not required.
    +	//
    +	// It must not be longer than 253 characters and must consist of one or more DNS sub-domains
    +	// separated by slashes. This field is immutable.
    +	//
    +	// +required
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +
    +	// Generation tracks the change in a pool over time. Whenever a driver
    +	// changes something about one or more of the resources in a pool, it
    +	// must change the generation in all ResourceSlices which are part of
    +	// that pool. Consumers of ResourceSlices should only consider
    +	// resources from the pool with the highest generation number. The
    +	// generation may be reset by drivers, which should be fine for
    +	// consumers, assuming that all ResourceSlices in a pool are updated to
    +	// match or deleted.
    +	//
    +	// Combined with ResourceSliceCount, this mechanism enables consumers to
    +	// detect pools which are comprised of multiple ResourceSlices and are
    +	// in an incomplete state.
    +	//
    +	// +required
    +	Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"`
    +
    +	// ResourceSliceCount is the total number of ResourceSlices in the pool at this
    +	// generation number. Must be greater than zero.
    +	//
    +	// Consumers can use this to check whether they have seen all ResourceSlices
    +	// belonging to the same pool.
    +	//
    +	// +required
    +	ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"`
    +}
    +
    +const ResourceSliceMaxSharedCapacity = 128
    +const ResourceSliceMaxDevices = 128
    +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name.
    +
    +// Device represents one individual hardware instance that can be selected based
    +// on its attributes. Besides the name, exactly one field must be set.
    +type Device struct {
    +	// Name is unique identifier among all devices managed by
    +	// the driver in the pool. It must be a DNS label.
    +	//
    +	// +required
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +
    +	// Basic defines one device instance.
    +	//
    +	// +optional
    +	// +oneOf=deviceType
    +	Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"`
    +}
    +
    +// BasicDevice defines one device instance.
    +type BasicDevice struct {
    +	// Attributes defines the set of attributes for this device.
    +	// The name of each attribute must be unique in that set.
    +	//
    +	// The maximum number of attributes and capacities combined is 32.
    +	//
    +	// +optional
    +	Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"`
    +
    +	// Capacity defines the set of capacities for this device.
    +	// The name of each capacity must be unique in that set.
    +	//
    +	// The maximum number of attributes and capacities combined is 32.
    +	//
    +	// +optional
    +	Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"`
    +}
    +
    +// Limit for the sum of the number of entries in both ResourceSlices.
    +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
    +
    +// QualifiedName is the name of a device attribute or capacity.
    +//
    +// Attributes and capacities are defined either by the owner of the specific
    +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes
    +// project). Because they are sometimes compared across devices, a given name
    +// is expected to mean the same thing and have the same type on all devices.
    +//
    +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain
    +// followed by a slash ("/") followed by a C identifier
    +// (e.g. "dra.example.com/theName"). Names which do not include the
    +// domain prefix are assumed to be part of the driver's domain. Attributes
    +// or capacities defined by 3rd parties must include the domain prefix.
    +//
    +// The maximum length for the DNS subdomain is 63 characters (same as
    +// for driver names) and the maximum length of the C identifier
    +// is 32.
    +type QualifiedName string
    +
    +// FullyQualifiedName is a QualifiedName where the domain is set.
    +type FullyQualifiedName string
    +
    +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`).
    +const DeviceMaxIDLength = 32
    +
    +// DeviceAttribute must have exactly one field set.
    +type DeviceAttribute struct {
    +	// The Go field names below have a Value suffix to avoid a conflict between the
    +	// field "String" and the corresponding method. That method is required.
    +	// The Kubernetes API is defined without that suffix to keep it more natural.
    +
    +	// IntValue is a number.
    +	//
    +	// +optional
    +	// +oneOf=ValueType
    +	IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"`
    +
    +	// BoolValue is a true/false value.
    +	//
    +	// +optional
    +	// +oneOf=ValueType
    +	BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"`
    +
    +	// StringValue is a string. Must not be longer than 64 characters.
    +	//
    +	// +optional
    +	// +oneOf=ValueType
    +	StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"`
    +
    +	// VersionValue is a semantic version according to semver.org spec 2.0.0.
    +	// Must not be longer than 64 characters.
    +	//
    +	// +optional
    +	// +oneOf=ValueType
    +	VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"`
    +}
    +
    +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value.
    +const DeviceAttributeMaxValueLength = 64
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// ResourceSliceList is a collection of ResourceSlices.
    +type ResourceSliceList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Items is the list of resource ResourceSlices.
    +	Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// ResourceClaim describes a request for access to resources in the cluster,
    +// for use by workloads. For example, if a workload needs an accelerator device
    +// with specific properties, this is how that request is expressed. The status
    +// stanza tracks whether this claim has been satisfied and what specific
    +// resources have been allocated.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +type ResourceClaim struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Spec describes what is being requested and how to configure it.
    +	// The spec is immutable.
    +	Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +
    +	// Status describes whether the claim is ready to use and what has been allocated.
    +	// +optional
    +	Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
    +type ResourceClaimSpec struct {
    +	// Devices defines how to request devices.
    +	//
    +	// +optional
    +	Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"`
    +
    +	// Controller is the name of the DRA driver that is meant
    +	// to handle allocation of this claim. If empty, allocation is handled
    +	// by the scheduler while scheduling a pod.
    +	//
    +	// Must be a DNS subdomain and should end with a DNS domain owned by the
    +	// vendor of the driver.
    +	//
    +	// This is an alpha field and requires enabling the DRAControlPlaneController
    +	// feature gate.
    +	//
    +	// +optional
    +	// +featureGate=DRAControlPlaneController
    +	Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"`
    +}
    +
    +// DeviceClaim defines how to request devices with a ResourceClaim.
    +type DeviceClaim struct {
    +	// Requests represent individual requests for distinct devices which
    +	// must all be satisfied. If empty, nothing needs to be allocated.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"`
    +
    +	// These constraints must be satisfied by the set of devices that get
    +	// allocated for the claim.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"`
    +
    +	// This field holds configuration for multiple potential drivers which
    +	// could satisfy requests in this claim. It is ignored while allocating
    +	// the claim.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
    +}
    +
    +const (
    +	DeviceRequestsMaxSize    = AllocationResultsMaxSize
    +	DeviceConstraintsMaxSize = 32
    +	DeviceConfigMaxSize      = 32
    +)
    +
    +// DeviceRequest is a request for devices required for a claim.
    +// This is typically a request for a single resource like a device, but can
    +// also ask for several identical devices.
    +//
    +// A DeviceClassName is currently required. Clients must check that it is
    +// indeed set. It's absence indicates that something changed in a way that
    +// is not supported by the client yet, in which case it must refuse to
    +// handle the request.
    +type DeviceRequest struct {
    +	// Name can be used to reference this request in a pod.spec.containers[].resources.claims
    +	// entry and in a constraint of the claim.
    +	//
    +	// Must be a DNS label.
    +	//
    +	// +required
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +
    +	// DeviceClassName references a specific DeviceClass, which can define
    +	// additional configuration and selectors to be inherited by this
    +	// request.
    +	//
    +	// A class is required. Which classes are available depends on the cluster.
    +	//
    +	// Administrators may use this to restrict which devices may get
    +	// requested by only installing classes with selectors for permitted
    +	// devices. If users are free to request anything without restrictions,
    +	// then administrators can create an empty DeviceClass for users
    +	// to reference.
    +	//
    +	// +required
    +	DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"`
    +
    +	// Selectors define criteria which must be satisfied by a specific
    +	// device in order for that device to be considered for this
    +	// request. All selectors must be satisfied for a device to be
    +	// considered.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
    +
    +	// AllocationMode and its related fields define how devices are allocated
    +	// to satisfy this request. Supported values are:
    +	//
    +	// - ExactCount: This request is for a specific number of devices.
    +	//   This is the default. The exact number is provided in the
    +	//   count field.
    +	//
    +	// - All: This request is for all of the matching devices in a pool.
    +	//   Allocation will fail if some devices are already allocated,
    +	//   unless adminAccess is requested.
    +	//
    +	// If AlloctionMode is not specified, the default mode is ExactCount. If
    +	// the mode is ExactCount and count is not specified, the default count is
    +	// one. Any other requests must specify this field.
    +	//
    +	// More modes may get added in the future. Clients must refuse to handle
    +	// requests with unknown modes.
    +	//
    +	// +optional
    +	AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"`
    +
    +	// Count is used only when the count mode is "ExactCount". Must be greater than zero.
    +	// If AllocationMode is ExactCount and this field is not specified, the default is one.
    +	//
    +	// +optional
    +	// +oneOf=AllocationMode
    +	Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"`
    +
    +	// AdminAccess indicates that this is a claim for administrative access
    +	// to the device(s). Claims with AdminAccess are expected to be used for
    +	// monitoring or other management services for a device.  They ignore
    +	// all ordinary claims to the device with respect to access modes and
    +	// any resource allocations.
    +	//
    +	// +optional
    +	// +default=false
    +	AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
    +}
    +
    +const (
    +	DeviceSelectorsMaxSize = 32
    +)
    +
    +type DeviceAllocationMode string
    +
    +// Valid [DeviceRequest.CountMode] values.
    +const (
    +	DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount")
    +	DeviceAllocationModeAll        = DeviceAllocationMode("All")
    +)
    +
    +// DeviceSelector must have exactly one field set.
    +type DeviceSelector struct {
    +	// CEL contains a CEL expression for selecting a device.
    +	//
    +	// +optional
    +	// +oneOf=SelectorType
    +	CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"`
    +}
    +
    +// CELDeviceSelector contains a CEL expression for selecting a device.
    +type CELDeviceSelector struct {
    +	// Expression is a CEL expression which evaluates a single device. It
    +	// must evaluate to true when the device under consideration satisfies
    +	// the desired criteria, and false when it does not. Any other result
    +	// is an error and causes allocation of devices to abort.
    +	//
    +	// The expression's input is an object named "device", which carries
    +	// the following properties:
    +	//  - driver (string): the name of the driver which defines this device.
    +	//  - attributes (map[string]object): the device's attributes, grouped by prefix
    +	//    (e.g. device.attributes["dra.example.com"] evaluates to an object with all
    +	//    of the attributes which were prefixed by "dra.example.com".
    +	//  - capacity (map[string]object): the device's capacities, grouped by prefix.
    +	//
    +	// Example: Consider a device with driver="dra.example.com", which exposes
    +	// two attributes named "model" and "ext.example.com/family" and which
    +	// exposes one capacity named "modules". This input to this expression
    +	// would have the following fields:
    +	//
    +	//     device.driver
    +	//     device.attributes["dra.example.com"].model
    +	//     device.attributes["ext.example.com"].family
    +	//     device.capacity["dra.example.com"].modules
    +	//
    +	// The device.driver field can be used to check for a specific driver,
    +	// either as a high-level precondition (i.e. you only want to consider
    +	// devices from this driver) or as part of a multi-clause expression
    +	// that is meant to consider devices from different drivers.
    +	//
    +	// The value type of each attribute is defined by the device
    +	// definition, and users who write these expressions must consult the
    +	// documentation for their specific drivers. The value type of each
    +	// capacity is Quantity.
    +	//
    +	// If an unknown prefix is used as a lookup in either device.attributes
    +	// or device.capacity, an empty map will be returned. Any reference to
    +	// an unknown field will cause an evaluation error and allocation to
    +	// abort.
    +	//
    +	// A robust expression should check for the existence of attributes
    +	// before referencing them.
    +	//
    +	// For ease of use, the cel.bind() function is enabled, and can be used
    +	// to simplify expressions that access multiple attributes with the
    +	// same domain. For example:
    +	//
    +	//     cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
    +	//
    +	// +required
    +	Expression string `json:"expression" protobuf:"bytes,1,name=expression"`
    +}
    +
    +// DeviceConstraint must have exactly one field set besides Requests.
    +type DeviceConstraint struct {
    +	// Requests is a list of the one or more requests in this claim which
    +	// must co-satisfy this constraint. If a request is fulfilled by
    +	// multiple devices, then all of the devices must satisfy the
    +	// constraint. If this is not specified, this constraint applies to all
    +	// requests in this claim.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
    +
    +	// MatchAttribute requires that all devices in question have this
    +	// attribute and that its type and value are the same across those
    +	// devices.
    +	//
    +	// For example, if you specified "dra.example.com/numa" (a hypothetical example!),
    +	// then only devices in the same NUMA node will be chosen. A device which
    +	// does not have that attribute will not be chosen. All devices should
    +	// use a value of the same type for this attribute because that is part of
    +	// its specification, but if one device doesn't, then it also will not be
    +	// chosen.
    +	//
    +	// Must include the domain qualifier.
    +	//
    +	// +optional
    +	// +oneOf=ConstraintType
    +	MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
    +}
    +
    +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
    +type DeviceClaimConfiguration struct {
    +	// Requests lists the names of requests where the configuration applies.
    +	// If empty, it applies to all requests.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
    +
    +	DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"`
    +}
    +
    +// DeviceConfiguration must have exactly one field set. It gets embedded
    +// inline in some other structs which have other fields, so field names must
    +// not conflict with those.
    +type DeviceConfiguration struct {
    +	// Opaque provides driver-specific configuration parameters.
    +	//
    +	// +optional
    +	// +oneOf=ConfigurationType
    +	Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"`
    +}
    +
    +// OpaqueDeviceConfiguration contains configuration parameters for a driver
    +// in a format defined by the driver vendor.
    +type OpaqueDeviceConfiguration struct {
    +	// Driver is used to determine which kubelet plugin needs
    +	// to be passed these configuration parameters.
    +	//
    +	// An admission policy provided by the driver developer could use this
    +	// to decide whether it needs to validate them.
    +	//
    +	// Must be a DNS subdomain and should end with a DNS domain owned by the
    +	// vendor of the driver.
    +	//
    +	// +required
    +	Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
    +
    +	// Parameters can contain arbitrary data. It is the responsibility of
    +	// the driver developer to handle validation and versioning. Typically this
    +	// includes self-identification and a version ("kind" + "apiVersion" for
    +	// Kubernetes types), with conversion between different versions.
    +	//
    +	// +required
    +	Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"`
    +}
    +
    +// ResourceClaimStatus tracks whether the resource has been allocated and what
    +// the result of that was.
    +type ResourceClaimStatus struct {
    +	// Allocation is set once the claim has been allocated successfully.
    +	//
    +	// +optional
    +	Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"`
    +
    +	// ReservedFor indicates which entities are currently allowed to use
    +	// the claim. A Pod which references a ResourceClaim which is not
    +	// reserved for that Pod will not be started. A claim that is in
    +	// use or might be in use because it has been reserved must not get
    +	// deallocated.
    +	//
    +	// In a cluster with multiple scheduler instances, two pods might get
    +	// scheduled concurrently by different schedulers. When they reference
    +	// the same ResourceClaim which already has reached its maximum number
    +	// of consumers, only one pod can be scheduled.
    +	//
    +	// Both schedulers try to add their pod to the claim.status.reservedFor
    +	// field, but only the update that reaches the API server first gets
    +	// stored. The other one fails with an error and the scheduler
    +	// which issued it knows that it must put the pod back into the queue,
    +	// waiting for the ResourceClaim to become usable again.
    +	//
    +	// There can be at most 32 such reservations. This may get increased in
    +	// the future, but not reduced.
    +	//
    +	// +optional
    +	// +listType=map
    +	// +listMapKey=uid
    +	// +patchStrategy=merge
    +	// +patchMergeKey=uid
    +	ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
    +
    +	// Indicates that a claim is to be deallocated. While this is set,
    +	// no new consumers may be added to ReservedFor.
    +	//
    +	// This is only used if the claim needs to be deallocated by a DRA driver.
    +	// That driver then must deallocate this claim and reset the field
    +	// together with clearing the Allocation field.
    +	//
    +	// This is an alpha field and requires enabling the DRAControlPlaneController
    +	// feature gate.
    +	//
    +	// +optional
    +	// +featureGate=DRAControlPlaneController
    +	DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"`
    +}
    +
    +// ReservedForMaxSize is the maximum number of entries in
    +// claim.status.reservedFor.
    +const ResourceClaimReservedForMaxSize = 32
    +
    +// ResourceClaimConsumerReference contains enough information to let you
    +// locate the consumer of a ResourceClaim. The user must be a resource in the same
    +// namespace as the ResourceClaim.
    +type ResourceClaimConsumerReference struct {
    +	// APIGroup is the group for the resource being referenced. It is
    +	// empty for the core API. This matches the group in the APIVersion
    +	// that is used when creating the resources.
    +	// +optional
    +	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
    +	// Resource is the type of resource being referenced, for example "pods".
    +	// +required
    +	Resource string `json:"resource" protobuf:"bytes,3,name=resource"`
    +	// Name is the name of resource being referenced.
    +	// +required
    +	Name string `json:"name" protobuf:"bytes,4,name=name"`
    +	// UID identifies exactly one incarnation of the resource.
    +	// +required
    +	UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"`
    +}
    +
    +// AllocationResult contains attributes of an allocated resource.
    +type AllocationResult struct {
    +	// Devices is the result of allocating devices.
    +	//
    +	// +optional
    +	Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"`
    +
    +	// NodeSelector defines where the allocated resources are available. If
    +	// unset, they are available everywhere.
    +	//
    +	// +optional
    +	NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"`
    +
    +	// Controller is the name of the DRA driver which handled the
    +	// allocation. That driver is also responsible for deallocating the
    +	// claim. It is empty when the claim can be deallocated without
    +	// involving a driver.
    +	//
    +	// A driver may allocate devices provided by other drivers, so this
    +	// driver name here can be different from the driver names listed for
    +	// the results.
    +	//
    +	// This is an alpha field and requires enabling the DRAControlPlaneController
    +	// feature gate.
    +	//
    +	// +optional
    +	// +featureGate=DRAControlPlaneController
    +	Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"`
    +}
    +
    +// DeviceAllocationResult is the result of allocating devices.
    +type DeviceAllocationResult struct {
    +	// Results lists all allocated devices.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"`
    +
    +	// This field is a combination of all the claim and class configuration parameters.
    +	// Drivers can distinguish between those based on a flag.
    +	//
    +	// This includes configuration parameters for drivers which have no allocated
    +	// devices in the result because it is up to the drivers which configuration
    +	// parameters they support. They can silently ignore unknown configuration
    +	// parameters.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
    +}
    +
    +// AllocationResultsMaxSize represents the maximum number of
    +// entries in allocation.devices.results.
    +const AllocationResultsMaxSize = 32
    +
    +// DeviceRequestAllocationResult contains the allocation result for one request.
    +type DeviceRequestAllocationResult struct {
    +	// Request is the name of the request in the claim which caused this
    +	// device to be allocated. Multiple devices may have been allocated
    +	// per request.
    +	//
    +	// +required
    +	Request string `json:"request" protobuf:"bytes,1,name=request"`
    +
    +	// Driver specifies the name of the DRA driver whose kubelet
    +	// plugin should be invoked to process the allocation once the claim is
    +	// needed on a node.
    +	//
    +	// Must be a DNS subdomain and should end with a DNS domain owned by the
    +	// vendor of the driver.
    +	//
    +	// +required
    +	Driver string `json:"driver" protobuf:"bytes,2,name=driver"`
    +
    +	// This name together with the driver name and the device name field
    +	// identify which device was allocated (`//`).
    +	//
    +	// Must not be longer than 253 characters and may contain one or more
    +	// DNS sub-domains separated by slashes.
    +	//
    +	// +required
    +	Pool string `json:"pool" protobuf:"bytes,3,name=pool"`
    +
    +	// Device references one device instance via its name in the driver's
    +	// resource pool. It must be a DNS label.
    +	//
    +	// +required
    +	Device string `json:"device" protobuf:"bytes,4,name=device"`
    +}
    +
    +// DeviceAllocationConfiguration gets embedded in an AllocationResult.
    +type DeviceAllocationConfiguration struct {
    +	// Source records whether the configuration comes from a class and thus
    +	// is not something that a normal user would have been able to set
    +	// or from a claim.
    +	//
    +	// +required
    +	Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"`
    +
    +	// Requests lists the names of requests where the configuration applies.
    +	// If empty, its applies to all requests.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"`
    +
    +	DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"`
    +}
    +
    +type AllocationConfigSource string
    +
    +// Valid [DeviceAllocationConfiguration.Source] values.
    +const (
    +	AllocationConfigSourceClass = "FromClass"
    +	AllocationConfigSourceClaim = "FromClaim"
    +)
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// ResourceClaimList is a collection of claims.
    +type ResourceClaimList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Items is the list of resource claims.
    +	Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// PodSchedulingContext objects hold information that is needed to schedule
    +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
    +// mode.
    +//
    +// This is an alpha type and requires enabling the DRAControlPlaneController
    +// feature gate.
    +type PodSchedulingContext struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Spec describes where resources for the Pod are needed.
    +	Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +
    +	// Status describes where resources for the Pod can be allocated.
    +	//
    +	// +optional
    +	Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
    +}
    +
    +// PodSchedulingContextSpec describes where resources for the Pod are needed.
    +type PodSchedulingContextSpec struct {
    +	// SelectedNode is the node for which allocation of ResourceClaims that
    +	// are referenced by the Pod and that use "WaitForFirstConsumer"
    +	// allocation is to be attempted.
    +	//
    +	// +optional
    +	SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"`
    +
    +	// PotentialNodes lists nodes where the Pod might be able to run.
    +	//
    +	// The size of this field is limited to 128. This is large enough for
    +	// many clusters. Larger clusters may need more attempts to find a node
    +	// that suits all pending resources. This may get increased in the
    +	// future, but not reduced.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
    +}
    +
    +// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
    +type PodSchedulingContextStatus struct {
    +	// ResourceClaims describes resource availability for each
    +	// pod.spec.resourceClaim entry where the corresponding ResourceClaim
    +	// uses "WaitForFirstConsumer" allocation mode.
    +	//
    +	// +listType=map
    +	// +listMapKey=name
    +	// +optional
    +	ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"`
    +
    +	// If there ever is a need to support other kinds of resources
    +	// than ResourceClaim, then new fields could get added here
    +	// for those other resources.
    +}
    +
    +// ResourceClaimSchedulingStatus contains information about one particular
    +// ResourceClaim with "WaitForFirstConsumer" allocation mode.
    +type ResourceClaimSchedulingStatus struct {
    +	// Name matches the pod.spec.resourceClaims[*].Name field.
    +	//
    +	// +required
    +	Name string `json:"name" protobuf:"bytes,1,name=name"`
    +
    +	// UnsuitableNodes lists nodes that the ResourceClaim cannot be
    +	// allocated for.
    +	//
    +	// The size of this field is limited to 128, the same as for
    +	// PodSchedulingSpec.PotentialNodes. This may get increased in the
    +	// future, but not reduced.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"`
    +}
    +
    +// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
    +// node lists that are stored in PodSchedulingContext objects. This limit is part
    +// of the API.
    +const PodSchedulingNodeListMaxSize = 128
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// PodSchedulingContextList is a collection of Pod scheduling objects.
    +type PodSchedulingContextList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Items is the list of PodSchedulingContext objects.
    +	Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +
    +// DeviceClass is a vendor- or admin-provided resource that contains
    +// device configuration and selectors. It can be referenced in
    +// the device requests of a claim to apply these presets.
    +// Cluster scoped.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +type DeviceClass struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Spec defines what can be allocated and how to configure it.
    +	//
    +	// This is mutable. Consumers have to be prepared for classes changing
    +	// at any time, either because they get updated or replaced. Claim
    +	// allocations are done once based on whatever was set in classes at
    +	// the time of allocation.
    +	//
    +	// Changing the spec automatically increments the metadata.generation number.
    +	Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +}
    +
    +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
    +// and how to configure it.
    +type DeviceClassSpec struct {
    +	// Each selector must be satisfied by a device which is claimed via this class.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"`
    +
    +	// Config defines configuration parameters that apply to each device that is claimed via this class.
    +	// Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
    +	// configuration applies to exactly one driver.
    +	//
    +	// They are passed to the driver, but are not considered while allocating the claim.
    +	//
    +	// +optional
    +	// +listType=atomic
    +	Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
    +
    +	// Only nodes matching the selector will be considered by the scheduler
    +	// when trying to find a Node that fits a Pod when that Pod uses
    +	// a claim that has not been allocated yet *and* that claim
    +	// gets allocated through a control plane controller. It is ignored
    +	// when the claim does not use a control plane controller
    +	// for allocation.
    +	//
    +	// Setting this field is optional. If unset, all Nodes are candidates.
    +	//
    +	// This is an alpha field and requires enabling the DRAControlPlaneController
    +	// feature gate.
    +	//
    +	// +optional
    +	// +featureGate=DRAControlPlaneController
    +	SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"`
    +}
    +
    +// DeviceClassConfiguration is used in DeviceClass.
    +type DeviceClassConfiguration struct {
    +	DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// DeviceClassList is a collection of classes.
    +type DeviceClassList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Items is the list of resource classes.
    +	Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    +
    +// +genclient
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// ResourceClaimTemplate is used to produce ResourceClaim objects.
    +//
    +// This is an alpha type and requires enabling the DynamicResourceAllocation
    +// feature gate.
    +type ResourceClaimTemplate struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard object metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Describes the ResourceClaim that is to be generated.
    +	//
    +	// This field is immutable. A ResourceClaim will get created by the
    +	// control plane for a Pod when needed and then not get updated
    +	// anymore.
    +	Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +}
    +
    +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
    +type ResourceClaimTemplateSpec struct {
    +	// ObjectMeta may contain labels and annotations that will be copied into the PVC
    +	// when creating it. No other fields are allowed and will be rejected during
    +	// validation.
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Spec for the ResourceClaim. The entire content is copied unchanged
    +	// into the ResourceClaim that gets created from this template. The
    +	// same fields as in a ResourceClaim are also valid here.
    +	Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
    +}
    +
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.26
    +
    +// ResourceClaimTemplateList is a collection of claim templates.
    +type ResourceClaimTemplateList struct {
    +	metav1.TypeMeta `json:",inline"`
    +	// Standard list metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Items is the list of resource claim templates.
    +	Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
    new file mode 100644
    index 0000000000..1a44a971db
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
    @@ -0,0 +1,404 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1alpha3
    +
    +// This file contains a collection of methods that can be used from go-restful to
    +// generate Swagger API documentation for its models. Please read this PR for more
    +// information on the implementation: https://github.com/emicklei/go-restful/pull/215
    +//
    +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
    +// they are on one line! For multiple line or blocks that you want to ignore use ---.
    +// Any context after a --- is ignored.
    +//
    +// Those methods can be generated by using hack/update-codegen.sh
    +
    +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
    +var map_AllocationResult = map[string]string{
    +	"":             "AllocationResult contains attributes of an allocated resource.",
    +	"devices":      "Devices is the result of allocating devices.",
    +	"nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.",
    +	"controller":   "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
    +}
    +
    +func (AllocationResult) SwaggerDoc() map[string]string {
    +	return map_AllocationResult
    +}
    +
    +var map_BasicDevice = map[string]string{
    +	"":           "BasicDevice defines one device instance.",
    +	"attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
    +	"capacity":   "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
    +}
    +
    +func (BasicDevice) SwaggerDoc() map[string]string {
    +	return map_BasicDevice
    +}
    +
    +var map_CELDeviceSelector = map[string]string{
    +	"":           "CELDeviceSelector contains a CEL expression for selecting a device.",
    +	"expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n   (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n   of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n    device.driver\n    device.attributes[\"dra.example.com\"].model\n    device.attributes[\"ext.example.com\"].family\n    device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n    cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)",
    +}
    +
    +func (CELDeviceSelector) SwaggerDoc() map[string]string {
    +	return map_CELDeviceSelector
    +}
    +
    +var map_Device = map[string]string{
    +	"":      "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.",
    +	"name":  "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.",
    +	"basic": "Basic defines one device instance.",
    +}
    +
    +func (Device) SwaggerDoc() map[string]string {
    +	return map_Device
    +}
    +
    +var map_DeviceAllocationConfiguration = map[string]string{
    +	"":         "DeviceAllocationConfiguration gets embedded in an AllocationResult.",
    +	"source":   "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.",
    +	"requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.",
    +}
    +
    +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string {
    +	return map_DeviceAllocationConfiguration
    +}
    +
    +var map_DeviceAllocationResult = map[string]string{
    +	"":        "DeviceAllocationResult is the result of allocating devices.",
    +	"results": "Results lists all allocated devices.",
    +	"config":  "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.",
    +}
    +
    +func (DeviceAllocationResult) SwaggerDoc() map[string]string {
    +	return map_DeviceAllocationResult
    +}
    +
    +var map_DeviceAttribute = map[string]string{
    +	"":        "DeviceAttribute must have exactly one field set.",
    +	"int":     "IntValue is a number.",
    +	"bool":    "BoolValue is a true/false value.",
    +	"string":  "StringValue is a string. Must not be longer than 64 characters.",
    +	"version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.",
    +}
    +
    +func (DeviceAttribute) SwaggerDoc() map[string]string {
    +	return map_DeviceAttribute
    +}
    +
    +var map_DeviceClaim = map[string]string{
    +	"":            "DeviceClaim defines how to request devices with a ResourceClaim.",
    +	"requests":    "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.",
    +	"constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.",
    +	"config":      "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.",
    +}
    +
    +func (DeviceClaim) SwaggerDoc() map[string]string {
    +	return map_DeviceClaim
    +}
    +
    +var map_DeviceClaimConfiguration = map[string]string{
    +	"":         "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.",
    +	"requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.",
    +}
    +
    +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string {
    +	return map_DeviceClaimConfiguration
    +}
    +
    +var map_DeviceClass = map[string]string{
    +	"":         "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    +	"metadata": "Standard object metadata",
    +	"spec":     "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.",
    +}
    +
    +func (DeviceClass) SwaggerDoc() map[string]string {
    +	return map_DeviceClass
    +}
    +
    +var map_DeviceClassConfiguration = map[string]string{
    +	"": "DeviceClassConfiguration is used in DeviceClass.",
    +}
    +
    +func (DeviceClassConfiguration) SwaggerDoc() map[string]string {
    +	return map_DeviceClassConfiguration
    +}
    +
    +var map_DeviceClassList = map[string]string{
    +	"":         "DeviceClassList is a collection of classes.",
    +	"metadata": "Standard list metadata",
    +	"items":    "Items is the list of resource classes.",
    +}
    +
    +func (DeviceClassList) SwaggerDoc() map[string]string {
    +	return map_DeviceClassList
    +}
    +
    +var map_DeviceClassSpec = map[string]string{
    +	"":              "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
    +	"selectors":     "Each selector must be satisfied by a device which is claimed via this class.",
    +	"config":        "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
    +	"suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
    +}
    +
    +func (DeviceClassSpec) SwaggerDoc() map[string]string {
    +	return map_DeviceClassSpec
    +}
    +
    +var map_DeviceConfiguration = map[string]string{
    +	"":       "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.",
    +	"opaque": "Opaque provides driver-specific configuration parameters.",
    +}
    +
    +func (DeviceConfiguration) SwaggerDoc() map[string]string {
    +	return map_DeviceConfiguration
    +}
    +
    +var map_DeviceConstraint = map[string]string{
    +	"":               "DeviceConstraint must have exactly one field set besides Requests.",
    +	"requests":       "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.",
    +	"matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.",
    +}
    +
    +func (DeviceConstraint) SwaggerDoc() map[string]string {
    +	return map_DeviceConstraint
    +}
    +
    +var map_DeviceRequest = map[string]string{
    +	"":                "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.",
    +	"name":            "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.",
    +	"deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.",
    +	"selectors":       "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.",
    +	"allocationMode":  "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n  This is the default. The exact number is provided in the\n  count field.\n\n- All: This request is for all of the matching devices in a pool.\n  Allocation will fail if some devices are already allocated,\n  unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.",
    +	"count":           "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.",
    +	"adminAccess":     "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device.  They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
    +}
    +
    +func (DeviceRequest) SwaggerDoc() map[string]string {
    +	return map_DeviceRequest
    +}
    +
    +var map_DeviceRequestAllocationResult = map[string]string{
    +	"":        "DeviceRequestAllocationResult contains the allocation result for one request.",
    +	"request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.",
    +	"driver":  "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
    +	"pool":    "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
    +	"device":  "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
    +}
    +
    +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string {
    +	return map_DeviceRequestAllocationResult
    +}
    +
    +var map_DeviceSelector = map[string]string{
    +	"":    "DeviceSelector must have exactly one field set.",
    +	"cel": "CEL contains a CEL expression for selecting a device.",
    +}
    +
    +func (DeviceSelector) SwaggerDoc() map[string]string {
    +	return map_DeviceSelector
    +}
    +
    +var map_OpaqueDeviceConfiguration = map[string]string{
    +	"":           "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
    +	"driver":     "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
    +	"parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.",
    +}
    +
    +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string {
    +	return map_OpaqueDeviceConfiguration
    +}
    +
    +var map_PodSchedulingContext = map[string]string{
    +	"":         "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.",
    +	"metadata": "Standard object metadata",
    +	"spec":     "Spec describes where resources for the Pod are needed.",
    +	"status":   "Status describes where resources for the Pod can be allocated.",
    +}
    +
    +func (PodSchedulingContext) SwaggerDoc() map[string]string {
    +	return map_PodSchedulingContext
    +}
    +
    +var map_PodSchedulingContextList = map[string]string{
    +	"":         "PodSchedulingContextList is a collection of Pod scheduling objects.",
    +	"metadata": "Standard list metadata",
    +	"items":    "Items is the list of PodSchedulingContext objects.",
    +}
    +
    +func (PodSchedulingContextList) SwaggerDoc() map[string]string {
    +	return map_PodSchedulingContextList
    +}
    +
    +var map_PodSchedulingContextSpec = map[string]string{
    +	"":               "PodSchedulingContextSpec describes where resources for the Pod are needed.",
    +	"selectedNode":   "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.",
    +	"potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
    +}
    +
    +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string {
    +	return map_PodSchedulingContextSpec
    +}
    +
    +var map_PodSchedulingContextStatus = map[string]string{
    +	"":               "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
    +	"resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
    +}
    +
    +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string {
    +	return map_PodSchedulingContextStatus
    +}
    +
    +var map_ResourceClaim = map[string]string{
    +	"":         "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    +	"metadata": "Standard object metadata",
    +	"spec":     "Spec describes what is being requested and how to configure it. The spec is immutable.",
    +	"status":   "Status describes whether the claim is ready to use and what has been allocated.",
    +}
    +
    +func (ResourceClaim) SwaggerDoc() map[string]string {
    +	return map_ResourceClaim
    +}
    +
    +var map_ResourceClaimConsumerReference = map[string]string{
    +	"":         "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.",
    +	"apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
    +	"resource": "Resource is the type of resource being referenced, for example \"pods\".",
    +	"name":     "Name is the name of resource being referenced.",
    +	"uid":      "UID identifies exactly one incarnation of the resource.",
    +}
    +
    +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimConsumerReference
    +}
    +
    +var map_ResourceClaimList = map[string]string{
    +	"":         "ResourceClaimList is a collection of claims.",
    +	"metadata": "Standard list metadata",
    +	"items":    "Items is the list of resource claims.",
    +}
    +
    +func (ResourceClaimList) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimList
    +}
    +
    +var map_ResourceClaimSchedulingStatus = map[string]string{
    +	"":                "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.",
    +	"name":            "Name matches the pod.spec.resourceClaims[*].Name field.",
    +	"unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.",
    +}
    +
    +func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimSchedulingStatus
    +}
    +
    +var map_ResourceClaimSpec = map[string]string{
    +	"":           "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.",
    +	"devices":    "Devices defines how to request devices.",
    +	"controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
    +}
    +
    +func (ResourceClaimSpec) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimSpec
    +}
    +
    +var map_ResourceClaimStatus = map[string]string{
    +	"":                      "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
    +	"allocation":            "Allocation is set once the claim has been allocated successfully.",
    +	"reservedFor":           "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.",
    +	"deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
    +}
    +
    +func (ResourceClaimStatus) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimStatus
    +}
    +
    +var map_ResourceClaimTemplate = map[string]string{
    +	"":         "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    +	"metadata": "Standard object metadata",
    +	"spec":     "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.",
    +}
    +
    +func (ResourceClaimTemplate) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimTemplate
    +}
    +
    +var map_ResourceClaimTemplateList = map[string]string{
    +	"":         "ResourceClaimTemplateList is a collection of claim templates.",
    +	"metadata": "Standard list metadata",
    +	"items":    "Items is the list of resource claim templates.",
    +}
    +
    +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimTemplateList
    +}
    +
    +var map_ResourceClaimTemplateSpec = map[string]string{
    +	"":         "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.",
    +	"metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
    +	"spec":     "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.",
    +}
    +
    +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string {
    +	return map_ResourceClaimTemplateSpec
    +}
    +
    +var map_ResourcePool = map[string]string{
    +	"":                   "ResourcePool describes the pool that ResourceSlices belong to.",
    +	"name":               "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.",
    +	"generation":         "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.",
    +	"resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.",
    +}
    +
    +func (ResourcePool) SwaggerDoc() map[string]string {
    +	return map_ResourcePool
    +}
    +
    +var map_ResourceSlice = map[string]string{
    +	"":         "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
    +	"metadata": "Standard object metadata",
    +	"spec":     "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.",
    +}
    +
    +func (ResourceSlice) SwaggerDoc() map[string]string {
    +	return map_ResourceSlice
    +}
    +
    +var map_ResourceSliceList = map[string]string{
    +	"":         "ResourceSliceList is a collection of ResourceSlices.",
    +	"metadata": "Standard list metadata",
    +	"items":    "Items is the list of resource ResourceSlices.",
    +}
    +
    +func (ResourceSliceList) SwaggerDoc() map[string]string {
    +	return map_ResourceSliceList
    +}
    +
    +var map_ResourceSliceSpec = map[string]string{
    +	"":             "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.",
    +	"driver":       "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.",
    +	"pool":         "Pool describes the pool that this ResourceSlice belongs to.",
    +	"nodeName":     "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.",
    +	"nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.",
    +	"allNodes":     "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.",
    +	"devices":      "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.",
    +}
    +
    +func (ResourceSliceSpec) SwaggerDoc() map[string]string {
    +	return map_ResourceSliceSpec
    +}
    +
    +// AUTO-GENERATED FUNCTIONS END HERE
    diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
    new file mode 100644
    index 0000000000..58171df1f2
    --- /dev/null
    +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
    @@ -0,0 +1,927 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by deepcopy-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	resource "k8s.io/apimachinery/pkg/api/resource"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
    +	*out = *in
    +	in.Devices.DeepCopyInto(&out.Devices)
    +	if in.NodeSelector != nil {
    +		in, out := &in.NodeSelector, &out.NodeSelector
    +		*out = new(v1.NodeSelector)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult.
    +func (in *AllocationResult) DeepCopy() *AllocationResult {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(AllocationResult)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) {
    +	*out = *in
    +	if in.Attributes != nil {
    +		in, out := &in.Attributes, &out.Attributes
    +		*out = make(map[QualifiedName]DeviceAttribute, len(*in))
    +		for key, val := range *in {
    +			(*out)[key] = *val.DeepCopy()
    +		}
    +	}
    +	if in.Capacity != nil {
    +		in, out := &in.Capacity, &out.Capacity
    +		*out = make(map[QualifiedName]resource.Quantity, len(*in))
    +		for key, val := range *in {
    +			(*out)[key] = val.DeepCopy()
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice.
    +func (in *BasicDevice) DeepCopy() *BasicDevice {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(BasicDevice)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector.
    +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(CELDeviceSelector)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *Device) DeepCopyInto(out *Device) {
    +	*out = *in
    +	if in.Basic != nil {
    +		in, out := &in.Basic, &out.Basic
    +		*out = new(BasicDevice)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
    +func (in *Device) DeepCopy() *Device {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(Device)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) {
    +	*out = *in
    +	if in.Requests != nil {
    +		in, out := &in.Requests, &out.Requests
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration.
    +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceAllocationConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) {
    +	*out = *in
    +	if in.Results != nil {
    +		in, out := &in.Results, &out.Results
    +		*out = make([]DeviceRequestAllocationResult, len(*in))
    +		copy(*out, *in)
    +	}
    +	if in.Config != nil {
    +		in, out := &in.Config, &out.Config
    +		*out = make([]DeviceAllocationConfiguration, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult.
    +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceAllocationResult)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) {
    +	*out = *in
    +	if in.IntValue != nil {
    +		in, out := &in.IntValue, &out.IntValue
    +		*out = new(int64)
    +		**out = **in
    +	}
    +	if in.BoolValue != nil {
    +		in, out := &in.BoolValue, &out.BoolValue
    +		*out = new(bool)
    +		**out = **in
    +	}
    +	if in.StringValue != nil {
    +		in, out := &in.StringValue, &out.StringValue
    +		*out = new(string)
    +		**out = **in
    +	}
    +	if in.VersionValue != nil {
    +		in, out := &in.VersionValue, &out.VersionValue
    +		*out = new(string)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute.
    +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceAttribute)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) {
    +	*out = *in
    +	if in.Requests != nil {
    +		in, out := &in.Requests, &out.Requests
    +		*out = make([]DeviceRequest, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.Constraints != nil {
    +		in, out := &in.Constraints, &out.Constraints
    +		*out = make([]DeviceConstraint, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.Config != nil {
    +		in, out := &in.Config, &out.Config
    +		*out = make([]DeviceClaimConfiguration, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim.
    +func (in *DeviceClaim) DeepCopy() *DeviceClaim {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClaim)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) {
    +	*out = *in
    +	if in.Requests != nil {
    +		in, out := &in.Requests, &out.Requests
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration.
    +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClaimConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass.
    +func (in *DeviceClass) DeepCopy() *DeviceClass {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClass)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *DeviceClass) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) {
    +	*out = *in
    +	in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration.
    +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClassConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]DeviceClass, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList.
    +func (in *DeviceClassList) DeepCopy() *DeviceClassList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClassList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *DeviceClassList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) {
    +	*out = *in
    +	if in.Selectors != nil {
    +		in, out := &in.Selectors, &out.Selectors
    +		*out = make([]DeviceSelector, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.Config != nil {
    +		in, out := &in.Config, &out.Config
    +		*out = make([]DeviceClassConfiguration, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	if in.SuitableNodes != nil {
    +		in, out := &in.SuitableNodes, &out.SuitableNodes
    +		*out = new(v1.NodeSelector)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec.
    +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceClassSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) {
    +	*out = *in
    +	if in.Opaque != nil {
    +		in, out := &in.Opaque, &out.Opaque
    +		*out = new(OpaqueDeviceConfiguration)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration.
    +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) {
    +	*out = *in
    +	if in.Requests != nil {
    +		in, out := &in.Requests, &out.Requests
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	if in.MatchAttribute != nil {
    +		in, out := &in.MatchAttribute, &out.MatchAttribute
    +		*out = new(FullyQualifiedName)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint.
    +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceConstraint)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) {
    +	*out = *in
    +	if in.Selectors != nil {
    +		in, out := &in.Selectors, &out.Selectors
    +		*out = make([]DeviceSelector, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest.
    +func (in *DeviceRequest) DeepCopy() *DeviceRequest {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceRequest)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult.
    +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceRequestAllocationResult)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) {
    +	*out = *in
    +	if in.CEL != nil {
    +		in, out := &in.CEL, &out.CEL
    +		*out = new(CELDeviceSelector)
    +		**out = **in
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector.
    +func (in *DeviceSelector) DeepCopy() *DeviceSelector {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(DeviceSelector)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) {
    +	*out = *in
    +	in.Parameters.DeepCopyInto(&out.Parameters)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration.
    +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(OpaqueDeviceConfiguration)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
    +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodSchedulingContext)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]PodSchedulingContext, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
    +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodSchedulingContextList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
    +	*out = *in
    +	if in.PotentialNodes != nil {
    +		in, out := &in.PotentialNodes, &out.PotentialNodes
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
    +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodSchedulingContextSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
    +	*out = *in
    +	if in.ResourceClaims != nil {
    +		in, out := &in.ResourceClaims, &out.ResourceClaims
    +		*out = make([]ResourceClaimSchedulingStatus, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
    +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(PodSchedulingContextStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	in.Status.DeepCopyInto(&out.Status)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
    +func (in *ResourceClaim) DeepCopy() *ResourceClaim {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaim)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceClaim) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference.
    +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimConsumerReference)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ResourceClaim, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList.
    +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) {
    +	*out = *in
    +	if in.UnsuitableNodes != nil {
    +		in, out := &in.UnsuitableNodes, &out.UnsuitableNodes
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus.
    +func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimSchedulingStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
    +	*out = *in
    +	in.Devices.DeepCopyInto(&out.Devices)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec.
    +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
    +	*out = *in
    +	if in.Allocation != nil {
    +		in, out := &in.Allocation, &out.Allocation
    +		*out = new(AllocationResult)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.ReservedFor != nil {
    +		in, out := &in.ReservedFor, &out.ReservedFor
    +		*out = make([]ResourceClaimConsumerReference, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus.
    +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimStatus)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate.
    +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimTemplate)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ResourceClaimTemplate, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList.
    +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimTemplateList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) {
    +	*out = *in
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec.
    +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceClaimTemplateSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) {
    +	*out = *in
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool.
    +func (in *ResourcePool) DeepCopy() *ResourcePool {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourcePool)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	in.Spec.DeepCopyInto(&out.Spec)
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
    +func (in *ResourceSlice) DeepCopy() *ResourceSlice {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceSlice)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceSlice) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]ResourceSlice, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
    +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceSliceList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) {
    +	*out = *in
    +	out.Pool = in.Pool
    +	if in.NodeSelector != nil {
    +		in, out := &in.NodeSelector, &out.NodeSelector
    +		*out = new(v1.NodeSelector)
    +		(*in).DeepCopyInto(*out)
    +	}
    +	if in.Devices != nil {
    +		in, out := &in.Devices, &out.Devices
    +		*out = make([]Device, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec.
    +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(ResourceSliceSpec)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go
    index 76c4da002e..ee3c668471 100644
    --- a/vendor/k8s.io/api/scheduling/v1/doc.go
    +++ b/vendor/k8s.io/api/scheduling/v1/doc.go
    @@ -17,7 +17,7 @@ limitations under the License.
     // +k8s:deepcopy-gen=package
     // +k8s:protobuf-gen=package
     // +k8s:openapi-gen=true
    -
    +// +k8s:prerelease-lifecycle-gen=true
     // +groupName=scheduling.k8s.io
     
     package v1 // import "k8s.io/api/scheduling/v1"
    diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto
    index c1a27e8baa..374e68238b 100644
    --- a/vendor/k8s.io/api/scheduling/v1/generated.proto
    +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto
    @@ -35,7 +35,7 @@ message PriorityClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // value represents the integer value of this priority class. This is the actual priority that pods
       // receive when they have the name of this class in their pod spec.
    @@ -66,7 +66,7 @@ message PriorityClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of PriorityClasses
       repeated PriorityClass items = 2;
    diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go
    index 146bae40d3..019dbcd00e 100644
    --- a/vendor/k8s.io/api/scheduling/v1/types.go
    +++ b/vendor/k8s.io/api/scheduling/v1/types.go
    @@ -24,6 +24,7 @@ import (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.14
     
     // PriorityClass defines mapping from a priority class name to the priority
     // integer value. The value can be any valid integer.
    @@ -59,6 +60,7 @@ type PriorityClass struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.14
     
     // PriorityClassList is a collection of priority classes.
     type PriorityClassList struct {
    diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..a4a432a64f
    --- /dev/null
    +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,34 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PriorityClass) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 14
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *PriorityClassList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 14
    +}
    diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
    index f0878fb16e..e42dccc688 100644
    --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
    @@ -36,7 +36,7 @@ message PriorityClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // value represents the integer value of this priority class. This is the actual priority that pods
       // receive when they have the name of this class in their pod spec.
    @@ -67,7 +67,7 @@ message PriorityClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of PriorityClasses
       repeated PriorityClass items = 2;
    diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
    index 43878184d6..7f77b01753 100644
    --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
    @@ -36,7 +36,7 @@ message PriorityClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // value represents the integer value of this priority class. This is the actual priority that pods
       // receive when they have the name of this class in their pod spec.
    @@ -67,7 +67,7 @@ message PriorityClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of PriorityClasses
       repeated PriorityClass items = 2;
    diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go
    index 75a6489da2..e2310dac23 100644
    --- a/vendor/k8s.io/api/storage/v1/doc.go
    +++ b/vendor/k8s.io/api/storage/v1/doc.go
    @@ -18,5 +18,6 @@ limitations under the License.
     // +k8s:protobuf-gen=package
     // +groupName=storage.k8s.io
     // +k8s:openapi-gen=true
    +// +k8s:prerelease-lifecycle-gen=true
     
     package v1 // import "k8s.io/api/storage/v1"
    diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto
    index 06bbe3d5cf..ec2beac468 100644
    --- a/vendor/k8s.io/api/storage/v1/generated.proto
    +++ b/vendor/k8s.io/api/storage/v1/generated.proto
    @@ -44,7 +44,7 @@ message CSIDriver {
       // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
       // alphanumerics between.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents the specification of the CSI Driver.
       optional CSIDriverSpec spec = 2;
    @@ -55,7 +55,7 @@ message CSIDriverList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSIDriver
       repeated CSIDriver items = 2;
    @@ -226,7 +226,7 @@ message CSIDriverSpec {
     message CSINode {
       // Standard object's metadata.
       // metadata.name must be the Kubernetes node name.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the specification of CSINode
       optional CSINodeSpec spec = 2;
    @@ -275,7 +275,7 @@ message CSINodeList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSINode
       repeated CSINode items = 2;
    @@ -327,7 +327,7 @@ message CSIStorageCapacity {
       //
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // nodeTopology defines which nodes have access to the storage
       // for which capacity was reported. If not set, the storage is
    @@ -336,7 +336,7 @@ message CSIStorageCapacity {
       // immutable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
     
       // storageClassName represents the name of the StorageClass that the reported capacity applies to.
       // It must meet the same requirements as the name of a StorageClass
    @@ -356,7 +356,7 @@ message CSIStorageCapacity {
       // unavailable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
     
       // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
       // for a GetCapacityRequest with topology and parameters that match the
    @@ -370,7 +370,7 @@ message CSIStorageCapacity {
       // API is ResourceRequirements.Requests in a volume claim.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
     }
     
     // CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
    @@ -378,7 +378,7 @@ message CSIStorageCapacityList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSIStorageCapacity objects.
       repeated CSIStorageCapacity items = 2;
    @@ -393,7 +393,7 @@ message StorageClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // provisioner indicates the type of the provisioner.
       optional string provisioner = 2;
    @@ -431,7 +431,7 @@ message StorageClass {
       // This field is only honored by servers that enable the VolumeScheduling feature.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
    +  repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
     }
     
     // StorageClassList is a collection of storage classes.
    @@ -439,7 +439,7 @@ message StorageClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of StorageClasses
       repeated StorageClass items = 2;
    @@ -466,7 +466,7 @@ message VolumeAttachment {
       // Standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents specification of the desired attach/detach volume behavior.
       // Populated by the Kubernetes system.
    @@ -484,7 +484,7 @@ message VolumeAttachmentList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of VolumeAttachments
       repeated VolumeAttachment items = 2;
    @@ -506,7 +506,7 @@ message VolumeAttachmentSource {
       // PersistentVolumeSpec. This field is beta-level and is only
       // honored by servers that enabled the CSIMigration feature.
       // +optional
    -  optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
    +  optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
     }
     
     // VolumeAttachmentSpec is the specification of a VolumeAttachment request.
    @@ -554,7 +554,7 @@ message VolumeAttachmentStatus {
     message VolumeError {
       // time represents the time the error was encountered.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
     
       // message represents the error encountered during Attach or Detach operation.
       // This string may be logged, so it should not contain sensitive
    diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go
    index a94c7f44c5..de2bbc2e06 100644
    --- a/vendor/k8s.io/api/storage/v1/types.go
    +++ b/vendor/k8s.io/api/storage/v1/types.go
    @@ -25,6 +25,7 @@ import (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.6
     
     // StorageClass describes the parameters for a class of storage for
     // which PersistentVolumes can be dynamically provisioned.
    @@ -79,6 +80,7 @@ type StorageClass struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.6
     
     // StorageClassList is a collection of storage classes.
     type StorageClassList struct {
    @@ -112,6 +114,7 @@ const (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.13
     
     // VolumeAttachment captures the intent to attach or detach the specified volume
     // to/from the specified node.
    @@ -137,6 +140,7 @@ type VolumeAttachment struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.13
     
     // VolumeAttachmentList is a collection of VolumeAttachment objects.
     type VolumeAttachmentList struct {
    @@ -227,6 +231,7 @@ type VolumeError struct {
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.18
     
     // CSIDriver captures information about a Container Storage Interface (CSI)
     // volume driver deployed on the cluster.
    @@ -251,6 +256,7 @@ type CSIDriver struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.18
     
     // CSIDriverList is a collection of CSIDriver objects.
     type CSIDriverList struct {
    @@ -491,6 +497,7 @@ const (
     // +genclient
     // +genclient:nonNamespaced
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.17
     
     // CSINode holds information about all CSI drivers installed on a node.
     // CSI drivers do not need to create the CSINode object directly. As long as
    @@ -572,6 +579,7 @@ type VolumeNodeResources struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.17
     
     // CSINodeList is a collection of CSINode objects.
     type CSINodeList struct {
    @@ -588,6 +596,7 @@ type CSINodeList struct {
     
     // +genclient
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.24
     
     // CSIStorageCapacity stores the result of one CSI GetCapacity call.
     // For a given StorageClass, this describes the available capacity in a
    @@ -673,6 +682,7 @@ type CSIStorageCapacity struct {
     }
     
     // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +// +k8s:prerelease-lifecycle-gen:introduced=1.24
     
     // CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
     type CSIStorageCapacityList struct {
    diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go
    new file mode 100644
    index 0000000000..a44c1181ad
    --- /dev/null
    +++ b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go
    @@ -0,0 +1,82 @@
    +//go:build !ignore_autogenerated
    +// +build !ignore_autogenerated
    +
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 18
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 18
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSINode) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 17
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 17
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 24
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 24
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *StorageClass) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 6
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 6
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 13
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 13
    +}
    diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
    index 93aefd933a..380adbf66e 100644
    --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
    @@ -65,7 +65,7 @@ message CSIStorageCapacity {
       //
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // nodeTopology defines which nodes have access to the storage
       // for which capacity was reported. If not set, the storage is
    @@ -74,7 +74,7 @@ message CSIStorageCapacity {
       // immutable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
     
       // storageClassName represents the name of the StorageClass that the reported capacity applies to.
       // It must meet the same requirements as the name of a StorageClass
    @@ -94,7 +94,7 @@ message CSIStorageCapacity {
       // unavailable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
     
       // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
       // for a GetCapacityRequest with topology and parameters that match the
    @@ -108,7 +108,7 @@ message CSIStorageCapacity {
       // API is ResourceRequirements.Requests in a volume claim.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
     }
     
     // CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
    @@ -116,7 +116,7 @@ message CSIStorageCapacityList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSIStorageCapacity objects.
       repeated CSIStorageCapacity items = 2;
    @@ -130,7 +130,7 @@ message VolumeAttachment {
       // Standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents specification of the desired attach/detach volume behavior.
       // Populated by the Kubernetes system.
    @@ -148,7 +148,7 @@ message VolumeAttachmentList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of VolumeAttachments
       repeated VolumeAttachment items = 2;
    @@ -170,7 +170,7 @@ message VolumeAttachmentSource {
       // PersistentVolumeSpec. This field is alpha-level and is only
       // honored by servers that enabled the CSIMigration feature.
       // +optional
    -  optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
    +  optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
     }
     
     // VolumeAttachmentSpec is the specification of a VolumeAttachment request.
    @@ -221,7 +221,7 @@ message VolumeAttributesClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Name of the CSI driver
       // This field is immutable.
    @@ -248,7 +248,7 @@ message VolumeAttributesClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of VolumeAttributesClass objects.
       repeated VolumeAttributesClass items = 2;
    @@ -258,7 +258,7 @@ message VolumeAttributesClassList {
     message VolumeError {
       // time represents the time the error was encountered.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
     
       // message represents the error encountered during Attach or Detach operation.
       // This string maybe logged, so it should not contain sensitive
    diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
    index c503ec6511..446a40c483 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
    @@ -524,10 +524,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
     
    +func (m *VolumeAttributesClass) Reset()      { *m = VolumeAttributesClass{} }
    +func (*VolumeAttributesClass) ProtoMessage() {}
    +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_73e4f72503e71065, []int{17}
    +}
    +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_VolumeAttributesClass.Merge(m, src)
    +}
    +func (m *VolumeAttributesClass) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *VolumeAttributesClass) XXX_DiscardUnknown() {
    +	xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo
    +
    +func (m *VolumeAttributesClassList) Reset()      { *m = VolumeAttributesClassList{} }
    +func (*VolumeAttributesClassList) ProtoMessage() {}
    +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_73e4f72503e71065, []int{18}
    +}
    +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_VolumeAttributesClassList.Merge(m, src)
    +}
    +func (m *VolumeAttributesClassList) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() {
    +	xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo
    +
     func (m *VolumeError) Reset()      { *m = VolumeError{} }
     func (*VolumeError) ProtoMessage() {}
     func (*VolumeError) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_73e4f72503e71065, []int{17}
    +	return fileDescriptor_73e4f72503e71065, []int{19}
     }
     func (m *VolumeError) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -555,7 +611,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo
     func (m *VolumeNodeResources) Reset()      { *m = VolumeNodeResources{} }
     func (*VolumeNodeResources) ProtoMessage() {}
     func (*VolumeNodeResources) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_73e4f72503e71065, []int{18}
    +	return fileDescriptor_73e4f72503e71065, []int{20}
     }
     func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -600,6 +656,9 @@ func init() {
     	proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec")
     	proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus")
     	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry")
    +	proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass")
    +	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry")
    +	proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList")
     	proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError")
     	proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources")
     }
    @@ -609,111 +668,115 @@ func init() {
     }
     
     var fileDescriptor_73e4f72503e71065 = []byte{
    -	// 1655 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x6f, 0x1b, 0xc5,
    -	0x1b, 0xcf, 0xc6, 0xce, 0xdb, 0x38, 0x69, 0x92, 0x49, 0xda, 0xbf, 0xff, 0x3e, 0xd8, 0x91, 0x11,
    -	0x34, 0xad, 0xca, 0xba, 0x0d, 0xa5, 0xaa, 0x2a, 0x55, 0x22, 0x9b, 0x04, 0xea, 0x36, 0x4e, 0xd3,
    -	0x71, 0x54, 0x55, 0x15, 0x07, 0xc6, 0xeb, 0x89, 0x33, 0x8d, 0xf7, 0xa5, 0x3b, 0xe3, 0x10, 0x73,
    -	0x82, 0x0b, 0x67, 0xc4, 0x81, 0x4f, 0xc0, 0x57, 0x00, 0x09, 0x2e, 0x1c, 0xa9, 0x84, 0x84, 0x2a,
    -	0x2e, 0xf4, 0x64, 0x51, 0xf3, 0x11, 0x90, 0x38, 0x44, 0x1c, 0xd0, 0xcc, 0x8e, 0xbd, 0x6f, 0x76,
    -	0x93, 0x70, 0xf0, 0xcd, 0xf3, 0xbc, 0xfc, 0x9e, 0x67, 0xe6, 0x79, 0x5d, 0x83, 0xab, 0x87, 0xb7,
    -	0x99, 0x4e, 0x9d, 0x12, 0x76, 0x69, 0x89, 0x71, 0xc7, 0xc3, 0x0d, 0x52, 0x3a, 0xba, 0x51, 0x23,
    -	0x1c, 0xdf, 0x28, 0x35, 0x88, 0x4d, 0x3c, 0xcc, 0x49, 0x5d, 0x77, 0x3d, 0x87, 0x3b, 0x30, 0xe7,
    -	0xcb, 0xea, 0xd8, 0xa5, 0xba, 0x92, 0xd5, 0x95, 0x6c, 0xee, 0xdd, 0x06, 0xe5, 0x07, 0xad, 0x9a,
    -	0x6e, 0x3a, 0x56, 0xa9, 0xe1, 0x34, 0x9c, 0x92, 0x54, 0xa9, 0xb5, 0xf6, 0xe5, 0x49, 0x1e, 0xe4,
    -	0x2f, 0x1f, 0x2a, 0x57, 0x0c, 0x99, 0x35, 0x1d, 0x4f, 0xd8, 0x8c, 0x9b, 0xcb, 0xdd, 0x0c, 0x64,
    -	0x2c, 0x6c, 0x1e, 0x50, 0x9b, 0x78, 0xed, 0x92, 0x7b, 0xd8, 0x90, 0x4a, 0x1e, 0x61, 0x4e, 0xcb,
    -	0x33, 0xc9, 0xb9, 0xb4, 0x58, 0xc9, 0x22, 0x1c, 0x0f, 0xb2, 0x55, 0x1a, 0xa6, 0xe5, 0xb5, 0x6c,
    -	0x4e, 0xad, 0xa4, 0x99, 0x5b, 0xa7, 0x29, 0x30, 0xf3, 0x80, 0x58, 0x38, 0xae, 0x57, 0xfc, 0x51,
    -	0x03, 0x33, 0x1b, 0xd5, 0xf2, 0xa6, 0x47, 0x8f, 0x88, 0x07, 0x3f, 0x01, 0xd3, 0xc2, 0xa3, 0x3a,
    -	0xe6, 0x38, 0xab, 0xad, 0x68, 0xab, 0x99, 0xb5, 0xeb, 0x7a, 0xf0, 0xc8, 0x7d, 0x60, 0xdd, 0x3d,
    -	0x6c, 0x08, 0x02, 0xd3, 0x85, 0xb4, 0x7e, 0x74, 0x43, 0x7f, 0x58, 0x7b, 0x46, 0x4c, 0x5e, 0x21,
    -	0x1c, 0x1b, 0xf0, 0x45, 0xa7, 0x30, 0xd6, 0xed, 0x14, 0x40, 0x40, 0x43, 0x7d, 0x54, 0xf8, 0x00,
    -	0xa4, 0x99, 0x4b, 0xcc, 0xec, 0xb8, 0x44, 0xbf, 0xa2, 0x0f, 0x0f, 0xa1, 0xde, 0x77, 0xab, 0xea,
    -	0x12, 0xd3, 0x98, 0x55, 0xb0, 0x69, 0x71, 0x42, 0x12, 0xa4, 0xf8, 0x83, 0x06, 0xe6, 0xfa, 0x52,
    -	0xdb, 0x94, 0x71, 0xf8, 0x71, 0xe2, 0x02, 0xfa, 0xd9, 0x2e, 0x20, 0xb4, 0xa5, 0xfb, 0x0b, 0xca,
    -	0xce, 0x74, 0x8f, 0x12, 0x72, 0xfe, 0x3e, 0x98, 0xa0, 0x9c, 0x58, 0x2c, 0x3b, 0xbe, 0x92, 0x5a,
    -	0xcd, 0xac, 0xbd, 0x7d, 0x26, 0xef, 0x8d, 0x39, 0x85, 0x38, 0x51, 0x16, 0xba, 0xc8, 0x87, 0x28,
    -	0xfe, 0x9e, 0x0e, 0xf9, 0x2e, 0xee, 0x04, 0xef, 0x80, 0x0b, 0x98, 0x73, 0x6c, 0x1e, 0x20, 0xf2,
    -	0xbc, 0x45, 0x3d, 0x52, 0x97, 0x37, 0x98, 0x36, 0x60, 0xb7, 0x53, 0xb8, 0xb0, 0x1e, 0xe1, 0xa0,
    -	0x98, 0xa4, 0xd0, 0x75, 0x9d, 0x7a, 0xd9, 0xde, 0x77, 0x1e, 0xda, 0x15, 0xa7, 0x65, 0x73, 0xf9,
    -	0xc0, 0x4a, 0x77, 0x37, 0xc2, 0x41, 0x31, 0x49, 0x68, 0x82, 0xe5, 0x23, 0xa7, 0xd9, 0xb2, 0xc8,
    -	0x36, 0xdd, 0x27, 0x66, 0xdb, 0x6c, 0x92, 0x8a, 0x53, 0x27, 0x2c, 0x9b, 0x5a, 0x49, 0xad, 0xce,
    -	0x18, 0xa5, 0x6e, 0xa7, 0xb0, 0xfc, 0x78, 0x00, 0xff, 0xa4, 0x53, 0x58, 0x1a, 0x40, 0x47, 0x03,
    -	0xc1, 0xe0, 0x5d, 0x30, 0xaf, 0x5e, 0x68, 0x03, 0xbb, 0xd8, 0xa4, 0xbc, 0x9d, 0x4d, 0x4b, 0x0f,
    -	0x97, 0xba, 0x9d, 0xc2, 0x7c, 0x35, 0xca, 0x42, 0x71, 0x59, 0x78, 0x0f, 0xcc, 0xed, 0xb3, 0x8f,
    -	0x3c, 0xa7, 0xe5, 0xee, 0x3a, 0x4d, 0x6a, 0xb6, 0xb3, 0x13, 0x2b, 0xda, 0xea, 0x8c, 0x51, 0xec,
    -	0x76, 0x0a, 0x73, 0x1f, 0x56, 0x43, 0x8c, 0x93, 0x38, 0x01, 0x45, 0x15, 0x21, 0x01, 0x73, 0xdc,
    -	0x39, 0x24, 0xb6, 0x78, 0x3a, 0xc2, 0x38, 0xcb, 0x4e, 0xca, 0x58, 0xae, 0xbe, 0x29, 0x96, 0x7b,
    -	0x21, 0x05, 0xe3, 0xa2, 0x0a, 0xe7, 0x5c, 0x98, 0xca, 0x50, 0x14, 0x15, 0x6e, 0x80, 0x45, 0xcf,
    -	0x0f, 0x0e, 0x43, 0xc4, 0x6d, 0xd5, 0x9a, 0x94, 0x1d, 0x64, 0xa7, 0xe4, 0x8d, 0x2f, 0x76, 0x3b,
    -	0x85, 0x45, 0x14, 0x67, 0xa2, 0xa4, 0x3c, 0xbc, 0x09, 0x66, 0x19, 0xd9, 0xa6, 0x76, 0xeb, 0xd8,
    -	0x8f, 0xe9, 0xb4, 0xd4, 0x5f, 0xe8, 0x76, 0x0a, 0xb3, 0xd5, 0xad, 0x80, 0x8e, 0x22, 0x52, 0xc5,
    -	0xef, 0x35, 0x30, 0xb5, 0x51, 0x2d, 0xef, 0x38, 0x75, 0x32, 0x82, 0x82, 0x2e, 0x47, 0x0a, 0xfa,
    -	0xf2, 0x29, 0x25, 0x21, 0x9c, 0x1a, 0x5a, 0xce, 0x7f, 0xf9, 0xe5, 0x2c, 0x64, 0x54, 0x3f, 0x5a,
    -	0x01, 0x69, 0x1b, 0x5b, 0x44, 0xba, 0x3e, 0x13, 0xe8, 0xec, 0x60, 0x8b, 0x20, 0xc9, 0x81, 0xef,
    -	0x80, 0x49, 0xdb, 0xa9, 0x93, 0xf2, 0xa6, 0x74, 0x60, 0xc6, 0xb8, 0xa0, 0x64, 0x26, 0x77, 0x24,
    -	0x15, 0x29, 0xae, 0x78, 0x4a, 0xee, 0xb8, 0x4e, 0xd3, 0x69, 0xb4, 0x1f, 0x90, 0x76, 0x2f, 0xb9,
    -	0xe5, 0x53, 0xee, 0x85, 0xe8, 0x28, 0x22, 0x05, 0x6b, 0x20, 0x83, 0x9b, 0x4d, 0xc7, 0xc4, 0x1c,
    -	0xd7, 0x9a, 0x44, 0x66, 0x6c, 0x66, 0xad, 0xf4, 0xa6, 0x3b, 0xfa, 0x15, 0x21, 0x8c, 0x23, 0x35,
    -	0x11, 0x98, 0x31, 0xdf, 0xed, 0x14, 0x32, 0xeb, 0x01, 0x0e, 0x0a, 0x83, 0x16, 0xbf, 0xd3, 0x40,
    -	0x46, 0xdd, 0x7a, 0x04, 0x2d, 0xec, 0x5e, 0xb4, 0x85, 0xbd, 0x75, 0x86, 0x78, 0x0d, 0x69, 0x60,
    -	0x66, 0xdf, 0x6d, 0xd9, 0xbd, 0xf6, 0xc0, 0x54, 0x5d, 0x06, 0x8d, 0x65, 0x35, 0x09, 0x7d, 0xe5,
    -	0x0c, 0xd0, 0xaa, 0x43, 0xce, 0x2b, 0x03, 0x53, 0xfe, 0x99, 0xa1, 0x1e, 0x54, 0xf1, 0xef, 0x14,
    -	0x80, 0x1b, 0xd5, 0x72, 0xac, 0x3f, 0x8c, 0x20, 0xad, 0x29, 0x98, 0x15, 0x99, 0xd3, 0xcb, 0x0d,
    -	0x95, 0xde, 0xef, 0x9d, 0x31, 0x12, 0xb8, 0x46, 0x9a, 0x55, 0xd2, 0x24, 0x26, 0x77, 0x3c, 0x3f,
    -	0xc9, 0x76, 0x42, 0x60, 0x28, 0x02, 0x0d, 0x37, 0xc1, 0x42, 0xaf, 0xdd, 0x35, 0x31, 0x63, 0x22,
    -	0xb9, 0xb3, 0x29, 0x99, 0xcc, 0x59, 0xe5, 0xe2, 0x42, 0x35, 0xc6, 0x47, 0x09, 0x0d, 0xf8, 0x04,
    -	0x4c, 0x9b, 0xe1, 0xce, 0x7a, 0x4a, 0xda, 0xe8, 0xbd, 0x85, 0x45, 0x7f, 0xd4, 0xc2, 0x36, 0xa7,
    -	0xbc, 0x6d, 0xcc, 0x8a, 0x94, 0xe9, 0xb7, 0xe0, 0x3e, 0x1a, 0x64, 0x60, 0xd1, 0xc2, 0xc7, 0xd4,
    -	0x6a, 0x59, 0x7e, 0x72, 0x57, 0xe9, 0x67, 0x44, 0xf6, 0xdf, 0xf3, 0x9b, 0x90, 0xad, 0xaf, 0x12,
    -	0x07, 0x43, 0x49, 0xfc, 0xe2, 0x2f, 0x1a, 0xb8, 0x94, 0x0c, 0xfc, 0x08, 0x0a, 0xa4, 0x1a, 0x2d,
    -	0x10, 0xfd, 0x94, 0x2c, 0x8e, 0x39, 0x38, 0xa4, 0x56, 0xbe, 0x9e, 0x04, 0xb3, 0xe1, 0x18, 0x8e,
    -	0x20, 0x81, 0xdf, 0x07, 0x19, 0xd7, 0x73, 0x8e, 0x28, 0xa3, 0x8e, 0x4d, 0x3c, 0xd5, 0x1d, 0x97,
    -	0x94, 0x4a, 0x66, 0x37, 0x60, 0xa1, 0xb0, 0x1c, 0x6c, 0x02, 0xe0, 0x62, 0x0f, 0x5b, 0x84, 0x8b,
    -	0x4a, 0x4e, 0xc9, 0x37, 0xb8, 0xfd, 0xa6, 0x37, 0x08, 0x5f, 0x4b, 0xdf, 0xed, 0xab, 0x6e, 0xd9,
    -	0xdc, 0x6b, 0x07, 0x2e, 0x06, 0x0c, 0x14, 0xc2, 0x87, 0x87, 0x60, 0xce, 0x23, 0x66, 0x13, 0x53,
    -	0x4b, 0x8d, 0xf5, 0xb4, 0x74, 0x73, 0x4b, 0x8c, 0x57, 0x14, 0x66, 0x9c, 0x74, 0x0a, 0xd7, 0x93,
    -	0x2b, 0xba, 0xbe, 0x4b, 0x3c, 0x46, 0x19, 0x27, 0x36, 0xf7, 0x53, 0x27, 0xa2, 0x83, 0xa2, 0xd8,
    -	0x62, 0x04, 0x58, 0x62, 0x40, 0x3e, 0x74, 0x39, 0x75, 0x6c, 0x96, 0x9d, 0x08, 0x46, 0x40, 0x25,
    -	0x44, 0x47, 0x11, 0x29, 0xb8, 0x0d, 0x96, 0x45, 0xb7, 0xfe, 0xd4, 0x37, 0xb0, 0x75, 0xec, 0x62,
    -	0x5b, 0x3c, 0x55, 0x76, 0x52, 0xce, 0xe2, 0xac, 0xd8, 0x8e, 0xd6, 0x07, 0xf0, 0xd1, 0x40, 0x2d,
    -	0xf8, 0x04, 0x2c, 0xfa, 0xeb, 0x91, 0x41, 0xed, 0x3a, 0xb5, 0x1b, 0x62, 0x39, 0x92, 0x6b, 0xc1,
    -	0x8c, 0x71, 0x55, 0xd4, 0xc6, 0xe3, 0x38, 0xf3, 0x64, 0x10, 0x11, 0x25, 0x41, 0xe0, 0x73, 0xb0,
    -	0x28, 0x2d, 0x92, 0xba, 0x6a, 0x2c, 0x94, 0xb0, 0xec, 0x74, 0x72, 0xb7, 0x11, 0x4f, 0x27, 0x12,
    -	0xa9, 0xd7, 0x7e, 0x7a, 0x6d, 0x6a, 0x8f, 0x78, 0x96, 0xf1, 0x7f, 0x15, 0xaf, 0xc5, 0xf5, 0x38,
    -	0x14, 0x4a, 0xa2, 0xe7, 0xee, 0x82, 0xf9, 0x58, 0xc0, 0xe1, 0x02, 0x48, 0x1d, 0x92, 0xb6, 0x3f,
    -	0xaf, 0x91, 0xf8, 0x09, 0x97, 0xc1, 0xc4, 0x11, 0x6e, 0xb6, 0x88, 0x9f, 0x81, 0xc8, 0x3f, 0xdc,
    -	0x19, 0xbf, 0xad, 0x15, 0x7f, 0xd2, 0x40, 0xa4, 0xb1, 0x8d, 0xa0, 0xb8, 0x2b, 0xd1, 0xe2, 0x5e,
    -	0x3d, 0x6b, 0x62, 0x0f, 0x29, 0xeb, 0x2f, 0x34, 0x30, 0x1b, 0xde, 0x02, 0xe1, 0x35, 0x30, 0x8d,
    -	0x5b, 0x75, 0x4a, 0x6c, 0xb3, 0xb7, 0xb3, 0xf4, 0xbd, 0x59, 0x57, 0x74, 0xd4, 0x97, 0x10, 0x3b,
    -	0x22, 0x39, 0x76, 0xa9, 0x87, 0x45, 0xa6, 0x55, 0x89, 0xe9, 0xd8, 0x75, 0x26, 0x9f, 0x29, 0xe5,
    -	0x37, 0xca, 0xad, 0x38, 0x13, 0x25, 0xe5, 0x8b, 0xdf, 0x8e, 0x83, 0x05, 0x3f, 0x41, 0xfc, 0x4f,
    -	0x04, 0x8b, 0xd8, 0x7c, 0x04, 0xed, 0x05, 0x45, 0xd6, 0xbe, 0xeb, 0xa7, 0xaf, 0x44, 0x81, 0x77,
    -	0xc3, 0xf6, 0x3f, 0xf8, 0x14, 0x4c, 0x32, 0x8e, 0x79, 0x8b, 0xc9, 0xf1, 0x97, 0x59, 0x5b, 0x3b,
    -	0x17, 0xaa, 0xd4, 0x0c, 0xf6, 0x3f, 0xff, 0x8c, 0x14, 0x62, 0xf1, 0x67, 0x0d, 0x2c, 0xc7, 0x55,
    -	0x46, 0x90, 0x70, 0x8f, 0xa2, 0x09, 0x77, 0xed, 0x3c, 0x37, 0x1a, 0x92, 0x74, 0xbf, 0x69, 0xe0,
    -	0x52, 0xe2, 0xf2, 0x72, 0xce, 0x8a, 0x5e, 0xe5, 0xc6, 0x3a, 0xe2, 0x4e, 0xb0, 0x3e, 0xcb, 0x5e,
    -	0xb5, 0x3b, 0x80, 0x8f, 0x06, 0x6a, 0xc1, 0x67, 0x60, 0x81, 0xda, 0x4d, 0x6a, 0x13, 0x35, 0x96,
    -	0x83, 0x70, 0x0f, 0x6c, 0x28, 0x71, 0x64, 0x19, 0xe6, 0x65, 0xb1, 0xbd, 0x94, 0x63, 0x28, 0x28,
    -	0x81, 0x5b, 0xfc, 0x75, 0x40, 0x78, 0xe4, 0x5a, 0x29, 0x2a, 0x4a, 0x52, 0x88, 0x97, 0xa8, 0x28,
    -	0x45, 0x47, 0x7d, 0x09, 0x99, 0x41, 0xf2, 0x29, 0x94, 0xa3, 0xe7, 0xcb, 0x20, 0xa9, 0x19, 0xca,
    -	0x20, 0x79, 0x46, 0x0a, 0x51, 0x78, 0x22, 0xd6, 0xb6, 0xd0, 0x7a, 0xd6, 0xf7, 0x64, 0x47, 0xd1,
    -	0x51, 0x5f, 0xa2, 0xf8, 0x4f, 0x6a, 0x40, 0x94, 0x64, 0x2a, 0x86, 0xae, 0xd4, 0xfb, 0xc2, 0x8f,
    -	0x5f, 0xa9, 0xde, 0xbf, 0x52, 0x1d, 0x7e, 0xa3, 0x01, 0x88, 0xfb, 0x10, 0x95, 0x5e, 0xaa, 0xfa,
    -	0xf9, 0x74, 0xff, 0xfc, 0x15, 0xa2, 0xaf, 0x27, 0xc0, 0xfc, 0x59, 0x9d, 0x53, 0x4e, 0xc0, 0xa4,
    -	0x00, 0x1a, 0xe0, 0x01, 0xa4, 0x20, 0xe3, 0x53, 0xb7, 0x3c, 0xcf, 0xf1, 0x54, 0xc9, 0x5e, 0x3e,
    -	0xdd, 0x21, 0x29, 0x6e, 0xe4, 0xe5, 0x37, 0x51, 0xa0, 0x7f, 0xd2, 0x29, 0x64, 0x42, 0x7c, 0x14,
    -	0xc6, 0x16, 0xa6, 0xea, 0x24, 0x30, 0x95, 0xfe, 0x0f, 0xa6, 0x36, 0xc9, 0x70, 0x53, 0x21, 0xec,
    -	0xdc, 0x16, 0xf8, 0xdf, 0x90, 0x07, 0x3a, 0xd7, 0x6c, 0xfb, 0x52, 0x03, 0x61, 0x1b, 0x70, 0x1b,
    -	0xa4, 0x39, 0x55, 0x95, 0x98, 0x59, 0xbb, 0x7a, 0xb6, 0x0e, 0xb3, 0x47, 0x2d, 0x12, 0x34, 0x4a,
    -	0x71, 0x42, 0x12, 0x05, 0x5e, 0x01, 0x53, 0x16, 0x61, 0x0c, 0x37, 0x94, 0xe5, 0xe0, 0x03, 0xaa,
    -	0xe2, 0x93, 0x51, 0x8f, 0x5f, 0xbc, 0x05, 0x96, 0x06, 0x7c, 0x92, 0xc2, 0x02, 0x98, 0x30, 0xe5,
    -	0x5f, 0x0a, 0xc2, 0xa1, 0x09, 0x63, 0x46, 0x74, 0x99, 0x0d, 0xf9, 0x5f, 0x82, 0x4f, 0x37, 0x3e,
    -	0x78, 0xf1, 0x3a, 0x3f, 0xf6, 0xf2, 0x75, 0x7e, 0xec, 0xd5, 0xeb, 0xfc, 0xd8, 0xe7, 0xdd, 0xbc,
    -	0xf6, 0xa2, 0x9b, 0xd7, 0x5e, 0x76, 0xf3, 0xda, 0xab, 0x6e, 0x5e, 0xfb, 0xa3, 0x9b, 0xd7, 0xbe,
    -	0xfa, 0x33, 0x3f, 0xf6, 0x34, 0x37, 0xfc, 0xdf, 0xda, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x12,
    -	0x41, 0x18, 0xc9, 0xca, 0x15, 0x00, 0x00,
    +	// 1728 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x23, 0x49,
    +	0x15, 0x4f, 0xc7, 0xce, 0x57, 0x39, 0x99, 0x24, 0x35, 0x99, 0xc5, 0xeb, 0x83, 0x1d, 0x19, 0xc1,
    +	0x66, 0x46, 0x4b, 0x7b, 0x12, 0x96, 0xd5, 0x68, 0xa5, 0x95, 0x48, 0x27, 0x81, 0xf5, 0x6e, 0x9c,
    +	0xc9, 0x96, 0xa3, 0xd1, 0x6a, 0xc5, 0x81, 0x72, 0xbb, 0xe2, 0xd4, 0xc6, 0xfd, 0xb1, 0x5d, 0xd5,
    +	0x21, 0xe6, 0x04, 0x17, 0xce, 0x88, 0x03, 0x7f, 0x01, 0xff, 0x02, 0x48, 0x70, 0xe1, 0xc8, 0x48,
    +	0x48, 0x68, 0xe1, 0xc2, 0x9e, 0x2c, 0xc6, 0xf3, 0x27, 0x20, 0x71, 0x88, 0x38, 0xa0, 0xaa, 0x2e,
    +	0xf7, 0xb7, 0x27, 0x36, 0x2b, 0xf9, 0xe6, 0x7a, 0x1f, 0xbf, 0x7a, 0x55, 0xef, 0xf7, 0x5e, 0xbd,
    +	0x36, 0x78, 0x72, 0xfd, 0x8c, 0xe9, 0xd4, 0x69, 0x60, 0x97, 0x36, 0x18, 0x77, 0x3c, 0xdc, 0x23,
    +	0x8d, 0x9b, 0xfd, 0x0e, 0xe1, 0x78, 0xbf, 0xd1, 0x23, 0x36, 0xf1, 0x30, 0x27, 0x5d, 0xdd, 0xf5,
    +	0x1c, 0xee, 0xc0, 0x4a, 0x60, 0xab, 0x63, 0x97, 0xea, 0xca, 0x56, 0x57, 0xb6, 0x95, 0xef, 0xf5,
    +	0x28, 0xbf, 0xf2, 0x3b, 0xba, 0xe9, 0x58, 0x8d, 0x9e, 0xd3, 0x73, 0x1a, 0xd2, 0xa5, 0xe3, 0x5f,
    +	0xca, 0x95, 0x5c, 0xc8, 0x5f, 0x01, 0x54, 0xa5, 0x1e, 0xdb, 0xd6, 0x74, 0x3c, 0xb1, 0x67, 0x7a,
    +	0xbb, 0xca, 0x7b, 0x91, 0x8d, 0x85, 0xcd, 0x2b, 0x6a, 0x13, 0x6f, 0xd0, 0x70, 0xaf, 0x7b, 0xd2,
    +	0xc9, 0x23, 0xcc, 0xf1, 0x3d, 0x93, 0xcc, 0xe4, 0xc5, 0x1a, 0x16, 0xe1, 0x38, 0x6f, 0xaf, 0xc6,
    +	0x24, 0x2f, 0xcf, 0xb7, 0x39, 0xb5, 0xb2, 0xdb, 0xbc, 0x7f, 0x9f, 0x03, 0x33, 0xaf, 0x88, 0x85,
    +	0xd3, 0x7e, 0xf5, 0x3f, 0x69, 0x60, 0xed, 0xa8, 0xdd, 0x3c, 0xf6, 0xe8, 0x0d, 0xf1, 0xe0, 0x4f,
    +	0xc1, 0xaa, 0x88, 0xa8, 0x8b, 0x39, 0x2e, 0x6b, 0xbb, 0xda, 0x5e, 0xe9, 0xe0, 0xa9, 0x1e, 0x5d,
    +	0x72, 0x08, 0xac, 0xbb, 0xd7, 0x3d, 0x21, 0x60, 0xba, 0xb0, 0xd6, 0x6f, 0xf6, 0xf5, 0xe7, 0x9d,
    +	0x2f, 0x88, 0xc9, 0x5b, 0x84, 0x63, 0x03, 0xbe, 0x1c, 0xd6, 0x16, 0x46, 0xc3, 0x1a, 0x88, 0x64,
    +	0x28, 0x44, 0x85, 0x9f, 0x80, 0x22, 0x73, 0x89, 0x59, 0x5e, 0x94, 0xe8, 0x8f, 0xf5, 0xc9, 0x29,
    +	0xd4, 0xc3, 0xb0, 0xda, 0x2e, 0x31, 0x8d, 0x75, 0x05, 0x5b, 0x14, 0x2b, 0x24, 0x41, 0xea, 0x7f,
    +	0xd4, 0xc0, 0x46, 0x68, 0x75, 0x4a, 0x19, 0x87, 0x3f, 0xc9, 0x1c, 0x40, 0x9f, 0xee, 0x00, 0xc2,
    +	0x5b, 0x86, 0xbf, 0xa5, 0xf6, 0x59, 0x1d, 0x4b, 0x62, 0xc1, 0x7f, 0x0c, 0x96, 0x28, 0x27, 0x16,
    +	0x2b, 0x2f, 0xee, 0x16, 0xf6, 0x4a, 0x07, 0xdf, 0x99, 0x2a, 0x7a, 0x63, 0x43, 0x21, 0x2e, 0x35,
    +	0x85, 0x2f, 0x0a, 0x20, 0xea, 0xff, 0x2c, 0xc6, 0x62, 0x17, 0x67, 0x82, 0x1f, 0x80, 0x07, 0x98,
    +	0x73, 0x6c, 0x5e, 0x21, 0xf2, 0xa5, 0x4f, 0x3d, 0xd2, 0x95, 0x27, 0x58, 0x35, 0xe0, 0x68, 0x58,
    +	0x7b, 0x70, 0x98, 0xd0, 0xa0, 0x94, 0xa5, 0xf0, 0x75, 0x9d, 0x6e, 0xd3, 0xbe, 0x74, 0x9e, 0xdb,
    +	0x2d, 0xc7, 0xb7, 0xb9, 0xbc, 0x60, 0xe5, 0x7b, 0x9e, 0xd0, 0xa0, 0x94, 0x25, 0x34, 0xc1, 0xce,
    +	0x8d, 0xd3, 0xf7, 0x2d, 0x72, 0x4a, 0x2f, 0x89, 0x39, 0x30, 0xfb, 0xa4, 0xe5, 0x74, 0x09, 0x2b,
    +	0x17, 0x76, 0x0b, 0x7b, 0x6b, 0x46, 0x63, 0x34, 0xac, 0xed, 0xbc, 0xc8, 0xd1, 0xdf, 0x0d, 0x6b,
    +	0x0f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x08, 0x36, 0xd5, 0x0d, 0x1d, 0x61, 0x17, 0x9b, 0x94,
    +	0x0f, 0xca, 0x45, 0x19, 0xe1, 0xc3, 0xd1, 0xb0, 0xb6, 0xd9, 0x4e, 0xaa, 0x50, 0xda, 0x16, 0x7e,
    +	0x04, 0x36, 0x2e, 0xd9, 0x8f, 0x3d, 0xc7, 0x77, 0xcf, 0x9d, 0x3e, 0x35, 0x07, 0xe5, 0xa5, 0x5d,
    +	0x6d, 0x6f, 0xcd, 0xa8, 0x8f, 0x86, 0xb5, 0x8d, 0x1f, 0xb5, 0x63, 0x8a, 0xbb, 0xb4, 0x00, 0x25,
    +	0x1d, 0x21, 0x01, 0x1b, 0xdc, 0xb9, 0x26, 0xb6, 0xb8, 0x3a, 0xc2, 0x38, 0x2b, 0x2f, 0xcb, 0x5c,
    +	0xee, 0xbd, 0x29, 0x97, 0x17, 0x31, 0x07, 0xe3, 0x91, 0x4a, 0xe7, 0x46, 0x5c, 0xca, 0x50, 0x12,
    +	0x15, 0x1e, 0x81, 0x6d, 0x2f, 0x48, 0x0e, 0x43, 0xc4, 0xf5, 0x3b, 0x7d, 0xca, 0xae, 0xca, 0x2b,
    +	0xf2, 0xc4, 0x8f, 0x46, 0xc3, 0xda, 0x36, 0x4a, 0x2b, 0x51, 0xd6, 0x1e, 0xbe, 0x07, 0xd6, 0x19,
    +	0x39, 0xa5, 0xb6, 0x7f, 0x1b, 0xe4, 0x74, 0x55, 0xfa, 0x6f, 0x8d, 0x86, 0xb5, 0xf5, 0xf6, 0x49,
    +	0x24, 0x47, 0x09, 0xab, 0xfa, 0x1f, 0x34, 0xb0, 0x72, 0xd4, 0x6e, 0x9e, 0x39, 0x5d, 0x32, 0x87,
    +	0x82, 0x6e, 0x26, 0x0a, 0xfa, 0x9d, 0x7b, 0x4a, 0x42, 0x04, 0x35, 0xb1, 0x9c, 0xff, 0x1d, 0x94,
    +	0xb3, 0xb0, 0x51, 0xfd, 0x68, 0x17, 0x14, 0x6d, 0x6c, 0x11, 0x19, 0xfa, 0x5a, 0xe4, 0x73, 0x86,
    +	0x2d, 0x82, 0xa4, 0x06, 0x7e, 0x17, 0x2c, 0xdb, 0x4e, 0x97, 0x34, 0x8f, 0x65, 0x00, 0x6b, 0xc6,
    +	0x03, 0x65, 0xb3, 0x7c, 0x26, 0xa5, 0x48, 0x69, 0xc5, 0x55, 0x72, 0xc7, 0x75, 0xfa, 0x4e, 0x6f,
    +	0xf0, 0x09, 0x19, 0x8c, 0xc9, 0x2d, 0xaf, 0xf2, 0x22, 0x26, 0x47, 0x09, 0x2b, 0xd8, 0x01, 0x25,
    +	0xdc, 0xef, 0x3b, 0x26, 0xe6, 0xb8, 0xd3, 0x27, 0x92, 0xb1, 0xa5, 0x83, 0xc6, 0x9b, 0xce, 0x18,
    +	0x54, 0x84, 0xd8, 0x1c, 0xa9, 0x17, 0x81, 0x19, 0x9b, 0xa3, 0x61, 0xad, 0x74, 0x18, 0xe1, 0xa0,
    +	0x38, 0x68, 0xfd, 0xf7, 0x1a, 0x28, 0xa9, 0x53, 0xcf, 0xa1, 0x85, 0x7d, 0x94, 0x6c, 0x61, 0xdf,
    +	0x9e, 0x22, 0x5f, 0x13, 0x1a, 0x98, 0x19, 0x86, 0x2d, 0xbb, 0xd7, 0x05, 0x58, 0xe9, 0xca, 0xa4,
    +	0xb1, 0xb2, 0x26, 0xa1, 0x1f, 0x4f, 0x01, 0xad, 0x3a, 0xe4, 0xa6, 0xda, 0x60, 0x25, 0x58, 0x33,
    +	0x34, 0x86, 0xaa, 0xff, 0xa7, 0x00, 0xe0, 0x51, 0xbb, 0x99, 0xea, 0x0f, 0x73, 0xa0, 0x35, 0x05,
    +	0xeb, 0x82, 0x39, 0x63, 0x6e, 0x28, 0x7a, 0x7f, 0x7f, 0xca, 0x4c, 0xe0, 0x0e, 0xe9, 0xb7, 0x49,
    +	0x9f, 0x98, 0xdc, 0xf1, 0x02, 0x92, 0x9d, 0xc5, 0xc0, 0x50, 0x02, 0x1a, 0x1e, 0x83, 0xad, 0x71,
    +	0xbb, 0xeb, 0x63, 0xc6, 0x04, 0xb9, 0xcb, 0x05, 0x49, 0xe6, 0xb2, 0x0a, 0x71, 0xab, 0x9d, 0xd2,
    +	0xa3, 0x8c, 0x07, 0xfc, 0x0c, 0xac, 0x9a, 0xf1, 0xce, 0x7a, 0x0f, 0x6d, 0xf4, 0xf1, 0xc0, 0xa2,
    +	0x7f, 0xea, 0x63, 0x9b, 0x53, 0x3e, 0x30, 0xd6, 0x05, 0x65, 0xc2, 0x16, 0x1c, 0xa2, 0x41, 0x06,
    +	0xb6, 0x2d, 0x7c, 0x4b, 0x2d, 0xdf, 0x0a, 0xc8, 0xdd, 0xa6, 0x3f, 0x27, 0xb2, 0xff, 0xce, 0xbe,
    +	0x85, 0x6c, 0x7d, 0xad, 0x34, 0x18, 0xca, 0xe2, 0xd7, 0xff, 0xaa, 0x81, 0xb7, 0xb2, 0x89, 0x9f,
    +	0x43, 0x81, 0xb4, 0x93, 0x05, 0xa2, 0xdf, 0xc3, 0xe2, 0x54, 0x80, 0x13, 0x6a, 0xe5, 0x37, 0xcb,
    +	0x60, 0x3d, 0x9e, 0xc3, 0x39, 0x10, 0xf8, 0x07, 0xa0, 0xe4, 0x7a, 0xce, 0x0d, 0x65, 0xd4, 0xb1,
    +	0x89, 0xa7, 0xba, 0xe3, 0x43, 0xe5, 0x52, 0x3a, 0x8f, 0x54, 0x28, 0x6e, 0x07, 0xfb, 0x00, 0xb8,
    +	0xd8, 0xc3, 0x16, 0xe1, 0xa2, 0x92, 0x0b, 0xf2, 0x0e, 0x9e, 0xbd, 0xe9, 0x0e, 0xe2, 0xc7, 0xd2,
    +	0xcf, 0x43, 0xd7, 0x13, 0x9b, 0x7b, 0x83, 0x28, 0xc4, 0x48, 0x81, 0x62, 0xf8, 0xf0, 0x1a, 0x6c,
    +	0x78, 0xc4, 0xec, 0x63, 0x6a, 0xa9, 0x67, 0xbd, 0x28, 0xc3, 0x3c, 0x11, 0xcf, 0x2b, 0x8a, 0x2b,
    +	0xee, 0x86, 0xb5, 0xa7, 0xd9, 0x11, 0x5d, 0x3f, 0x27, 0x1e, 0xa3, 0x8c, 0x13, 0x9b, 0x07, 0xd4,
    +	0x49, 0xf8, 0xa0, 0x24, 0xb6, 0x78, 0x02, 0x2c, 0xf1, 0x40, 0x3e, 0x77, 0x39, 0x75, 0x6c, 0x56,
    +	0x5e, 0x8a, 0x9e, 0x80, 0x56, 0x4c, 0x8e, 0x12, 0x56, 0xf0, 0x14, 0xec, 0x88, 0x6e, 0xfd, 0xb3,
    +	0x60, 0x83, 0x93, 0x5b, 0x17, 0xdb, 0xe2, 0xaa, 0xca, 0xcb, 0xf2, 0x2d, 0x2e, 0x8b, 0xe9, 0xe8,
    +	0x30, 0x47, 0x8f, 0x72, 0xbd, 0xe0, 0x67, 0x60, 0x3b, 0x18, 0x8f, 0x0c, 0x6a, 0x77, 0xa9, 0xdd,
    +	0x13, 0xc3, 0x91, 0x1c, 0x0b, 0xd6, 0x8c, 0x27, 0xa2, 0x36, 0x5e, 0xa4, 0x95, 0x77, 0x79, 0x42,
    +	0x94, 0x05, 0x81, 0x5f, 0x82, 0x6d, 0xb9, 0x23, 0xe9, 0xaa, 0xc6, 0x42, 0x09, 0x2b, 0xaf, 0x66,
    +	0x67, 0x1b, 0x71, 0x75, 0x82, 0x48, 0xe3, 0xf6, 0x33, 0x6e, 0x53, 0x17, 0xc4, 0xb3, 0x8c, 0xb7,
    +	0x55, 0xbe, 0xb6, 0x0f, 0xd3, 0x50, 0x28, 0x8b, 0x5e, 0xf9, 0x10, 0x6c, 0xa6, 0x12, 0x0e, 0xb7,
    +	0x40, 0xe1, 0x9a, 0x0c, 0x82, 0xf7, 0x1a, 0x89, 0x9f, 0x70, 0x07, 0x2c, 0xdd, 0xe0, 0xbe, 0x4f,
    +	0x02, 0x06, 0xa2, 0x60, 0xf1, 0xc1, 0xe2, 0x33, 0xad, 0xfe, 0x67, 0x0d, 0x24, 0x1a, 0xdb, 0x1c,
    +	0x8a, 0xbb, 0x95, 0x2c, 0xee, 0xbd, 0x69, 0x89, 0x3d, 0xa1, 0xac, 0x7f, 0xa9, 0x81, 0xf5, 0xf8,
    +	0x14, 0x08, 0xdf, 0x05, 0xab, 0xd8, 0xef, 0x52, 0x62, 0x9b, 0xe3, 0x99, 0x25, 0x8c, 0xe6, 0x50,
    +	0xc9, 0x51, 0x68, 0x21, 0x66, 0x44, 0x72, 0xeb, 0x52, 0x0f, 0x0b, 0xa6, 0xb5, 0x89, 0xe9, 0xd8,
    +	0x5d, 0x26, 0xaf, 0xa9, 0x10, 0x34, 0xca, 0x93, 0xb4, 0x12, 0x65, 0xed, 0xeb, 0xbf, 0x5b, 0x04,
    +	0x5b, 0x01, 0x41, 0x82, 0x4f, 0x04, 0x8b, 0xd8, 0x7c, 0x0e, 0xed, 0x05, 0x25, 0xc6, 0xbe, 0xa7,
    +	0xf7, 0x8f, 0x44, 0x51, 0x74, 0x93, 0xe6, 0x3f, 0xf8, 0x39, 0x58, 0x66, 0x1c, 0x73, 0x9f, 0xc9,
    +	0xe7, 0xaf, 0x74, 0x70, 0x30, 0x13, 0xaa, 0xf4, 0x8c, 0xe6, 0xbf, 0x60, 0x8d, 0x14, 0x62, 0xfd,
    +	0x2f, 0x1a, 0xd8, 0x49, 0xbb, 0xcc, 0x81, 0x70, 0x9f, 0x26, 0x09, 0xf7, 0xee, 0x2c, 0x27, 0x9a,
    +	0x40, 0xba, 0x7f, 0x68, 0xe0, 0xad, 0xcc, 0xe1, 0xe5, 0x3b, 0x2b, 0x7a, 0x95, 0x9b, 0xea, 0x88,
    +	0x67, 0xd1, 0xf8, 0x2c, 0x7b, 0xd5, 0x79, 0x8e, 0x1e, 0xe5, 0x7a, 0xc1, 0x2f, 0xc0, 0x16, 0xb5,
    +	0xfb, 0xd4, 0x26, 0xea, 0x59, 0x8e, 0xd2, 0x9d, 0xdb, 0x50, 0xd2, 0xc8, 0x32, 0xcd, 0x3b, 0x62,
    +	0x7a, 0x69, 0xa6, 0x50, 0x50, 0x06, 0xb7, 0xfe, 0xb7, 0x9c, 0xf4, 0xc8, 0xb1, 0x52, 0x54, 0x94,
    +	0x94, 0x10, 0x2f, 0x53, 0x51, 0x4a, 0x8e, 0x42, 0x0b, 0xc9, 0x20, 0x79, 0x15, 0x2a, 0xd0, 0xd9,
    +	0x18, 0x24, 0x3d, 0x63, 0x0c, 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x24, 0x62, 0x6c, 0x8b, 0x8d, 0x67,
    +	0x61, 0x24, 0x67, 0x4a, 0x8e, 0x42, 0x8b, 0xfa, 0x7f, 0x0b, 0x39, 0x59, 0x92, 0x54, 0x8c, 0x1d,
    +	0x69, 0xfc, 0x85, 0x9f, 0x3e, 0x52, 0x37, 0x3c, 0x52, 0x17, 0xfe, 0x56, 0x03, 0x10, 0x87, 0x10,
    +	0xad, 0x31, 0x55, 0x03, 0x3e, 0x7d, 0x3c, 0x7b, 0x85, 0xe8, 0x87, 0x19, 0xb0, 0xe0, 0xad, 0xae,
    +	0xa8, 0x20, 0x60, 0xd6, 0x00, 0xe5, 0x44, 0x00, 0x29, 0x28, 0x05, 0xd2, 0x13, 0xcf, 0x73, 0x3c,
    +	0x55, 0xb2, 0xef, 0xdc, 0x1f, 0x90, 0x34, 0x37, 0xaa, 0xf2, 0x9b, 0x28, 0xf2, 0xbf, 0x1b, 0xd6,
    +	0x4a, 0x31, 0x3d, 0x8a, 0x63, 0x8b, 0xad, 0xba, 0x24, 0xda, 0xaa, 0xf8, 0x7f, 0x6c, 0x75, 0x4c,
    +	0x26, 0x6f, 0x15, 0xc3, 0xae, 0x9c, 0x80, 0x6f, 0x4d, 0xb8, 0xa0, 0x99, 0xde, 0xb6, 0xd7, 0x8b,
    +	0xe0, 0x51, 0x78, 0xff, 0x1e, 0xed, 0xf8, 0x9c, 0xb0, 0x79, 0x4d, 0x7e, 0x07, 0x00, 0x04, 0x9f,
    +	0x4f, 0x92, 0xaa, 0xc1, 0xe0, 0x17, 0x7a, 0x1c, 0x87, 0x1a, 0x14, 0xb3, 0x82, 0x7e, 0xce, 0xd8,
    +	0x77, 0x38, 0x15, 0xb9, 0xe2, 0x87, 0x9b, 0x75, 0xfe, 0xfb, 0xa6, 0x13, 0xc4, 0xdf, 0x35, 0xf0,
    +	0x76, 0x6e, 0x20, 0x73, 0xe8, 0xec, 0x2f, 0x92, 0x9d, 0x7d, 0x7f, 0xe6, 0xcb, 0x9a, 0xd0, 0xde,
    +	0x7f, 0xa5, 0x81, 0x38, 0x3b, 0xe1, 0x29, 0x28, 0x72, 0xaa, 0x7a, 0x78, 0xe9, 0xe0, 0xc9, 0x74,
    +	0x27, 0xb8, 0xa0, 0x16, 0x89, 0x9e, 0x58, 0xb1, 0x42, 0x12, 0x05, 0x3e, 0x06, 0x2b, 0x16, 0x61,
    +	0x0c, 0xf7, 0xc6, 0xc4, 0x08, 0x3f, 0xbd, 0x5b, 0x81, 0x18, 0x8d, 0xf5, 0xf5, 0xf7, 0xc1, 0xc3,
    +	0x9c, 0x3f, 0x33, 0x60, 0x0d, 0x2c, 0x99, 0xf2, 0xcf, 0x28, 0x11, 0xd0, 0x92, 0xb1, 0x26, 0x0e,
    +	0x70, 0x24, 0xff, 0x85, 0x0a, 0xe4, 0xc6, 0x0f, 0x5f, 0xbe, 0xaa, 0x2e, 0x7c, 0xf5, 0xaa, 0xba,
    +	0xf0, 0xf5, 0xab, 0xea, 0xc2, 0x2f, 0x46, 0x55, 0xed, 0xe5, 0xa8, 0xaa, 0x7d, 0x35, 0xaa, 0x6a,
    +	0x5f, 0x8f, 0xaa, 0xda, 0xbf, 0x46, 0x55, 0xed, 0xd7, 0xaf, 0xab, 0x0b, 0x9f, 0x57, 0x26, 0xff,
    +	0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x30, 0xdb, 0x24, 0x04, 0x18, 0x00, 0x00,
     }
     
     func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
    @@ -1664,6 +1727,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
     	return len(dAtA) - i, nil
     }
     
    +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Parameters) > 0 {
    +		keysForParameters := make([]string, 0, len(m.Parameters))
    +		for k := range m.Parameters {
    +			keysForParameters = append(keysForParameters, string(k))
    +		}
    +		github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
    +		for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
    +			v := m.Parameters[string(keysForParameters[iNdEx])]
    +			baseI := i
    +			i -= len(v)
    +			copy(dAtA[i:], v)
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
    +			i--
    +			dAtA[i] = 0x12
    +			i -= len(keysForParameters[iNdEx])
    +			copy(dAtA[i:], keysForParameters[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx])))
    +			i--
    +			dAtA[i] = 0xa
    +			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	i -= len(m.DriverName)
    +	copy(dAtA[i:], m.DriverName)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
    +	i--
    +	dAtA[i] = 0x12
    +	{
    +		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
    +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Items) > 0 {
    +		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
    +			{
    +				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
    +				if err != nil {
    +					return 0, err
    +				}
    +				i -= size
    +				i = encodeVarintGenerated(dAtA, i, uint64(size))
    +			}
    +			i--
    +			dAtA[i] = 0x12
    +		}
    +	}
    +	{
    +		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		i -= size
    +		i = encodeVarintGenerated(dAtA, i, uint64(size))
    +	}
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *VolumeError) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -2086,6 +2258,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) {
     	return n
     }
     
    +func (m *VolumeAttributesClass) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ObjectMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.DriverName)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Parameters) > 0 {
    +		for k, v := range m.Parameters {
    +			_ = k
    +			_ = v
    +			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
    +			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
    +		}
    +	}
    +	return n
    +}
    +
    +func (m *VolumeAttributesClassList) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = m.ListMeta.Size()
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Items) > 0 {
    +		for _, e := range m.Items {
    +			l = e.Size()
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *VolumeError) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -2384,6 +2594,44 @@ func (this *VolumeAttachmentStatus) String() string {
     	}, "")
     	return s
     }
    +func (this *VolumeAttributesClass) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	keysForParameters := make([]string, 0, len(this.Parameters))
    +	for k := range this.Parameters {
    +		keysForParameters = append(keysForParameters, k)
    +	}
    +	github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
    +	mapStringForParameters := "map[string]string{"
    +	for _, k := range keysForParameters {
    +		mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
    +	}
    +	mapStringForParameters += "}"
    +	s := strings.Join([]string{`&VolumeAttributesClass{`,
    +		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
    +		`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
    +		`Parameters:` + mapStringForParameters + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
    +func (this *VolumeAttributesClassList) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	repeatedStringForItems := "[]VolumeAttributesClass{"
    +	for _, f := range this.Items {
    +		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + ","
    +	}
    +	repeatedStringForItems += "}"
    +	s := strings.Join([]string{`&VolumeAttributesClassList{`,
    +		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
    +		`Items:` + repeatedStringForItems + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *VolumeError) String() string {
     	if this == nil {
     		return "nil"
    @@ -5154,6 +5402,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.DriverName = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if m.Parameters == nil {
    +				m.Parameters = make(map[string]string)
    +			}
    +			var mapkey string
    +			var mapvalue string
    +			for iNdEx < postIndex {
    +				entryPreIndex := iNdEx
    +				var wire uint64
    +				for shift := uint(0); ; shift += 7 {
    +					if shift >= 64 {
    +						return ErrIntOverflowGenerated
    +					}
    +					if iNdEx >= l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					b := dAtA[iNdEx]
    +					iNdEx++
    +					wire |= uint64(b&0x7F) << shift
    +					if b < 0x80 {
    +						break
    +					}
    +				}
    +				fieldNum := int32(wire >> 3)
    +				if fieldNum == 1 {
    +					var stringLenmapkey uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapkey |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapkey := int(stringLenmapkey)
    +					if intStringLenmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapkey := iNdEx + intStringLenmapkey
    +					if postStringIndexmapkey < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapkey > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
    +					iNdEx = postStringIndexmapkey
    +				} else if fieldNum == 2 {
    +					var stringLenmapvalue uint64
    +					for shift := uint(0); ; shift += 7 {
    +						if shift >= 64 {
    +							return ErrIntOverflowGenerated
    +						}
    +						if iNdEx >= l {
    +							return io.ErrUnexpectedEOF
    +						}
    +						b := dAtA[iNdEx]
    +						iNdEx++
    +						stringLenmapvalue |= uint64(b&0x7F) << shift
    +						if b < 0x80 {
    +							break
    +						}
    +					}
    +					intStringLenmapvalue := int(stringLenmapvalue)
    +					if intStringLenmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
    +					if postStringIndexmapvalue < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if postStringIndexmapvalue > l {
    +						return io.ErrUnexpectedEOF
    +					}
    +					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
    +					iNdEx = postStringIndexmapvalue
    +				} else {
    +					iNdEx = entryPreIndex
    +					skippy, err := skipGenerated(dAtA[iNdEx:])
    +					if err != nil {
    +						return err
    +					}
    +					if (skippy < 0) || (iNdEx+skippy) < 0 {
    +						return ErrInvalidLengthGenerated
    +					}
    +					if (iNdEx + skippy) > postIndex {
    +						return io.ErrUnexpectedEOF
    +					}
    +					iNdEx += skippy
    +				}
    +			}
    +			m.Parameters[mapkey] = mapvalue
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
    +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
    +			}
    +			var msglen int
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				msglen |= int(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			if msglen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + msglen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Items = append(m.Items, VolumeAttributesClass{})
    +			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
    +				return err
    +			}
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *VolumeError) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto
    index f6e619d05d..dfef3f6cc5 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto
    +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto
    @@ -47,7 +47,7 @@ message CSIDriver {
       // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
       // alphanumerics between.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents the specification of the CSI Driver.
       optional CSIDriverSpec spec = 2;
    @@ -58,7 +58,7 @@ message CSIDriverList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSIDriver
       repeated CSIDriver items = 2;
    @@ -229,7 +229,7 @@ message CSIDriverSpec {
     // CSINode has an OwnerReference that points to the corresponding node object.
     message CSINode {
       // metadata.name must be the Kubernetes node name.
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec is the specification of CSINode
       optional CSINodeSpec spec = 2;
    @@ -277,7 +277,7 @@ message CSINodeList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSINode
       repeated CSINode items = 2;
    @@ -329,7 +329,7 @@ message CSIStorageCapacity {
       //
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // nodeTopology defines which nodes have access to the storage
       // for which capacity was reported. If not set, the storage is
    @@ -338,7 +338,7 @@ message CSIStorageCapacity {
       // immutable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2;
     
       // storageClassName represents the name of the StorageClass that the reported capacity applies to.
       // It must meet the same requirements as the name of a StorageClass
    @@ -358,7 +358,7 @@ message CSIStorageCapacity {
       // unavailable.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4;
     
       // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
       // for a GetCapacityRequest with topology and parameters that match the
    @@ -372,7 +372,7 @@ message CSIStorageCapacity {
       // API is ResourceRequirements.Requests in a volume claim.
       //
       // +optional
    -  optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
    +  optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5;
     }
     
     // CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
    @@ -380,7 +380,7 @@ message CSIStorageCapacityList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of CSIStorageCapacity objects.
       repeated CSIStorageCapacity items = 2;
    @@ -395,7 +395,7 @@ message StorageClass {
       // Standard object's metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // provisioner indicates the type of the provisioner.
       optional string provisioner = 2;
    @@ -433,7 +433,7 @@ message StorageClass {
       // This field is only honored by servers that enable the VolumeScheduling feature.
       // +optional
       // +listType=atomic
    -  repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
    +  repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
     }
     
     // StorageClassList is a collection of storage classes.
    @@ -441,7 +441,7 @@ message StorageClassList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of StorageClasses
       repeated StorageClass items = 2;
    @@ -468,7 +468,7 @@ message VolumeAttachment {
       // Standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // spec represents specification of the desired attach/detach volume behavior.
       // Populated by the Kubernetes system.
    @@ -486,7 +486,7 @@ message VolumeAttachmentList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // items is the list of VolumeAttachments
       repeated VolumeAttachment items = 2;
    @@ -508,7 +508,7 @@ message VolumeAttachmentSource {
       // PersistentVolumeSpec. This field is beta-level and is only
       // honored by servers that enabled the CSIMigration feature.
       // +optional
    -  optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
    +  optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2;
     }
     
     // VolumeAttachmentSpec is the specification of a VolumeAttachment request.
    @@ -552,11 +552,51 @@ message VolumeAttachmentStatus {
       optional VolumeError detachError = 4;
     }
     
    +// VolumeAttributesClass represents a specification of mutable volume attributes
    +// defined by the CSI driver. The class can be specified during dynamic provisioning
    +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
    +message VolumeAttributesClass {
    +  // Standard object's metadata.
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +
    +  // Name of the CSI driver
    +  // This field is immutable.
    +  optional string driverName = 2;
    +
    +  // parameters hold volume attributes defined by the CSI driver. These values
    +  // are opaque to the Kubernetes and are passed directly to the CSI driver.
    +  // The underlying storage provider supports changing these attributes on an
    +  // existing volume, however the parameters field itself is immutable. To
    +  // invoke a volume update, a new VolumeAttributesClass should be created with
    +  // new parameters, and the PersistentVolumeClaim should be updated to reference
    +  // the new VolumeAttributesClass.
    +  //
    +  // This field is required and must contain at least one key/value pair.
    +  // The keys cannot be empty, and the maximum number of parameters is 512, with
    +  // a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
    +  // the target PersistentVolumeClaim will be set to an "Infeasible" state in the
    +  // modifyVolumeStatus field.
    +  map parameters = 3;
    +}
    +
    +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects.
    +message VolumeAttributesClassList {
    +  // Standard list metadata
    +  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +  // +optional
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +
    +  // items is the list of VolumeAttributesClass objects.
    +  repeated VolumeAttributesClass items = 2;
    +}
    +
     // VolumeError captures an error encountered during a volume operation.
     message VolumeError {
       // time represents the time the error was encountered.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
     
       // message represents the error encountered during Attach or Detach operation.
       // This string may be logged, so it should not contain sensitive
    diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go
    index a281d0f26e..e2214ef2f5 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/register.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/register.go
    @@ -58,6 +58,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
     
     		&CSIStorageCapacity{},
     		&CSIStorageCapacityList{},
    +
    +		&VolumeAttributesClass{},
    +		&VolumeAttributesClassList{},
     	)
     
     	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
    diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go
    index 9333a28b8d..ce294e3dba 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/types.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/types.go
    @@ -714,3 +714,55 @@ type CSIStorageCapacityList struct {
     	// items is the list of CSIStorageCapacity objects.
     	Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"`
     }
    +
    +// +genclient
    +// +genclient:nonNamespaced
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// VolumeAttributesClass represents a specification of mutable volume attributes
    +// defined by the CSI driver. The class can be specified during dynamic provisioning
    +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
    +type VolumeAttributesClass struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// Standard object's metadata.
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// Name of the CSI driver
    +	// This field is immutable.
    +	DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"`
    +
    +	// parameters hold volume attributes defined by the CSI driver. These values
    +	// are opaque to the Kubernetes and are passed directly to the CSI driver.
    +	// The underlying storage provider supports changing these attributes on an
    +	// existing volume, however the parameters field itself is immutable. To
    +	// invoke a volume update, a new VolumeAttributesClass should be created with
    +	// new parameters, and the PersistentVolumeClaim should be updated to reference
    +	// the new VolumeAttributesClass.
    +	//
    +	// This field is required and must contain at least one key/value pair.
    +	// The keys cannot be empty, and the maximum number of parameters is 512, with
    +	// a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
    +	// the target PersistentVolumeClaim will be set to an "Infeasible" state in the
    +	// modifyVolumeStatus field.
    +	Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
    +}
    +
    +// +k8s:prerelease-lifecycle-gen:introduced=1.31
    +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
    +
    +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects.
    +type VolumeAttributesClassList struct {
    +	metav1.TypeMeta `json:",inline"`
    +
    +	// Standard list metadata
    +	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    +	// +optional
    +	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
    +
    +	// items is the list of VolumeAttributesClass objects.
    +	Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"`
    +}
    diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
    index 6d9d233066..8c1a663507 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
    @@ -216,6 +216,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
     	return map_VolumeAttachmentStatus
     }
     
    +var map_VolumeAttributesClass = map[string]string{
    +	"":           "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.",
    +	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"driverName": "Name of the CSI driver This field is immutable.",
    +	"parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.",
    +}
    +
    +func (VolumeAttributesClass) SwaggerDoc() map[string]string {
    +	return map_VolumeAttributesClass
    +}
    +
    +var map_VolumeAttributesClassList = map[string]string{
    +	"":         "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.",
    +	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
    +	"items":    "items is the list of VolumeAttributesClass objects.",
    +}
    +
    +func (VolumeAttributesClassList) SwaggerDoc() map[string]string {
    +	return map_VolumeAttributesClassList
    +}
    +
     var map_VolumeError = map[string]string{
     	"":        "VolumeError captures an error encountered during a volume operation.",
     	"time":    "time represents the time the error was encountered.",
    diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
    index f0450182b2..d87aa6b90b 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
    @@ -579,6 +579,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
    +	if in.Parameters != nil {
    +		in, out := &in.Parameters, &out.Parameters
    +		*out = make(map[string]string, len(*in))
    +		for key, val := range *in {
    +			(*out)[key] = val
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass.
    +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(VolumeAttributesClass)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) {
    +	*out = *in
    +	out.TypeMeta = in.TypeMeta
    +	in.ListMeta.DeepCopyInto(&out.ListMeta)
    +	if in.Items != nil {
    +		in, out := &in.Items, &out.Items
    +		*out = make([]VolumeAttributesClass, len(*in))
    +		for i := range *in {
    +			(*in)[i].DeepCopyInto(&(*out)[i])
    +		}
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList.
    +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(VolumeAttributesClassList)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
    +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
    +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object {
    +	if c := in.DeepCopy(); c != nil {
    +		return c
    +	}
    +	return nil
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *VolumeError) DeepCopyInto(out *VolumeError) {
     	*out = *in
    diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go
    index c5d23e7d45..4be57dc0d4 100644
    --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go
    +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go
    @@ -264,3 +264,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin
     func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) {
     	return 1, 22
     }
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    +
    +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
    +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) {
    +	return 1, 31
    +}
    +
    +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
    +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) {
    +	return 1, 34
    +}
    +
    +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
    +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
    +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) {
    +	return 1, 37
    +}
    diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto
    index fc8a3346e2..341e0bc5cf 100644
    --- a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto
    +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto
    @@ -51,7 +51,7 @@ message MigrationCondition {
     
       // The last time this condition was updated.
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3;
     
       // The reason for the condition's last transition.
       // +optional
    @@ -68,7 +68,7 @@ message StorageVersionMigration {
       // Standard object metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
     
       // Specification of the migration.
       // +optional
    @@ -84,7 +84,7 @@ message StorageVersionMigrationList {
       // Standard list metadata
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
     
       // Items is the list of StorageVersionMigration
       // +patchMergeKey=type
    diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go
    new file mode 100644
    index 0000000000..72c6438cb6
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go
    @@ -0,0 +1,165 @@
    +/*
    +Copyright 2018 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package testrestmapper
    +
    +import (
    +	"k8s.io/apimachinery/pkg/api/meta"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +	"k8s.io/apimachinery/pkg/util/sets"
    +)
    +
    +// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order:
    +//  1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy
    +//     kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version,
    +//     all other groups alphabetical.
    +//
    +// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests
    +// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked.
    +func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper {
    +	unionMapper := meta.MultiRESTMapper{}
    +	unionedGroups := sets.NewString()
    +	for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() {
    +		if !unionedGroups.Has(enabledVersion.Group) {
    +			unionedGroups.Insert(enabledVersion.Group)
    +			unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme))
    +		}
    +	}
    +
    +	if len(versionPatterns) != 0 {
    +		resourcePriority := []schema.GroupVersionResource{}
    +		kindPriority := []schema.GroupVersionKind{}
    +		for _, versionPriority := range versionPatterns {
    +			resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
    +			kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
    +		}
    +
    +		return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
    +	}
    +
    +	prioritizedGroups := []string{"", "extensions", "metrics"}
    +	resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...)
    +
    +	prioritizedGroupsSet := sets.NewString(prioritizedGroups...)
    +	remainingGroups := sets.String{}
    +	for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() {
    +		if !prioritizedGroupsSet.Has(enabledVersion.Group) {
    +			remainingGroups.Insert(enabledVersion.Group)
    +		}
    +	}
    +
    +	remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...)
    +	resourcePriority = append(resourcePriority, remainingResourcePriority...)
    +	kindPriority = append(kindPriority, remainingKindPriority...)
    +
    +	return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
    +}
    +
    +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first,
    +// then any non-preferred version of the group second.
    +func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) {
    +	resourcePriority := []schema.GroupVersionResource{}
    +	kindPriority := []schema.GroupVersionKind{}
    +
    +	for _, group := range groups {
    +		availableVersions := scheme.PrioritizedVersionsForGroup(group)
    +		if len(availableVersions) > 0 {
    +			resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource))
    +			kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind))
    +		}
    +	}
    +	for _, group := range groups {
    +		resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource})
    +		kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind})
    +	}
    +
    +	return resourcePriority, kindPriority
    +}
    +
    +func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper {
    +	mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group))
    +	for _, gv := range scheme.PrioritizedVersionsForGroup(group) {
    +		for kind := range scheme.KnownTypes(gv) {
    +			if ignoredKinds.Has(kind) {
    +				continue
    +			}
    +			scope := meta.RESTScopeNamespace
    +			if rootScopedKinds[gv.WithKind(kind).GroupKind()] {
    +				scope = meta.RESTScopeRoot
    +			}
    +			mapper.Add(gv.WithKind(kind), scope)
    +		}
    +	}
    +
    +	return mapper
    +}
    +
    +// hardcoded is good enough for the test we're running
    +var rootScopedKinds = map[schema.GroupKind]bool{
    +	{Group: "admission.k8s.io", Kind: "AdmissionReview"}: true,
    +
    +	{Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true,
    +	{Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}:   true,
    +
    +	{Group: "authentication.k8s.io", Kind: "TokenReview"}: true,
    +
    +	{Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}:     true,
    +	{Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true,
    +	{Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}:  true,
    +
    +	{Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true,
    +
    +	{Group: "", Kind: "Node"}:             true,
    +	{Group: "", Kind: "Namespace"}:        true,
    +	{Group: "", Kind: "PersistentVolume"}: true,
    +	{Group: "", Kind: "ComponentStatus"}:  true,
    +
    +	{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}:        true,
    +	{Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true,
    +
    +	{Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true,
    +
    +	{Group: "storage.k8s.io", Kind: "StorageClass"}:     true,
    +	{Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true,
    +
    +	{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true,
    +
    +	{Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true,
    +
    +	{Group: "audit.k8s.io", Kind: "Event"}:  true,
    +	{Group: "audit.k8s.io", Kind: "Policy"}: true,
    +
    +	{Group: "apiregistration.k8s.io", Kind: "APIService"}: true,
    +
    +	{Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true,
    +
    +	{Group: "wardle.example.com", Kind: "Fischer"}: true,
    +}
    +
    +// hardcoded is good enough for the test we're running
    +var ignoredKinds = sets.NewString(
    +	"ListOptions",
    +	"DeleteOptions",
    +	"Status",
    +	"PodLogOptions",
    +	"PodExecOptions",
    +	"PodAttachOptions",
    +	"PodPortForwardOptions",
    +	"PodProxyOptions",
    +	"NodeProxyOptions",
    +	"ServiceProxyOptions",
    +)
    diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
    index 69f1bc336d..50af8334f0 100644
    --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
    +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
    @@ -25,6 +25,8 @@ import (
     	"strconv"
     	"strings"
     
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
    +
     	inf "gopkg.in/inf.v0"
     )
     
    @@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
     	return result, nil
     }
     
    +func (q Quantity) MarshalCBOR() ([]byte, error) {
    +	// The call to String() should never return the string "" because the receiver's
    +	// address will never be nil.
    +	return cbor.Marshal(q.String())
    +}
    +
     // ToUnstructured implements the value.UnstructuredConverter interface.
     func (q Quantity) ToUnstructured() interface{} {
     	return q.String()
    @@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
     	return nil
     }
     
    +func (q *Quantity) UnmarshalCBOR(value []byte) error {
    +	var s *string
    +	if err := cbor.Unmarshal(value, &s); err != nil {
    +		return err
    +	}
    +
    +	if s == nil {
    +		q.d.Dec = nil
    +		q.i = int64Amount{}
    +		return nil
    +	}
    +
    +	parsed, err := ParseQuantity(strings.TrimSpace(*s))
    +	if err != nil {
    +		return err
    +	}
    +
    +	*q = parsed
    +	return nil
    +}
    +
     // NewDecimalQuantity returns a new Quantity representing the given
     // value in the given format.
     func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go
    new file mode 100644
    index 0000000000..2734a8f3ba
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go
    @@ -0,0 +1,76 @@
    +/*
    +Copyright 2020 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package validation
    +
    +import (
    +	"fmt"
    +
    +	"k8s.io/apimachinery/pkg/apis/meta/internalversion"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/util/validation/field"
    +)
    +
    +// ValidateListOptions returns all validation errors found while validating the ListOptions.
    +func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList {
    +	if options.Watch {
    +		return validateWatchOptions(options, isWatchListFeatureEnabled)
    +	}
    +	allErrs := field.ErrorList{}
    +	if match := options.ResourceVersionMatch; len(match) > 0 {
    +		if len(options.ResourceVersion) == 0 {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided"))
    +		}
    +		if len(options.Continue) > 0 {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided"))
    +		}
    +		if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan {
    +			allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""}))
    +		}
    +		if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\""))
    +		}
    +	}
    +	if options.SendInitialEvents != nil {
    +		allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list"))
    +	}
    +	return allErrs
    +}
    +
    +func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList {
    +	allErrs := field.ErrorList{}
    +	match := options.ResourceVersionMatch
    +	if options.SendInitialEvents != nil {
    +		if match != metav1.ResourceVersionMatchNotOlderThan {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan)))
    +		}
    +		if !isWatchListFeatureEnabled {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled"))
    +		}
    +	}
    +	if len(match) > 0 {
    +		if options.SendInitialEvents == nil {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided"))
    +		}
    +		if match != metav1.ResourceVersionMatchNotOlderThan {
    +			allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)}))
    +		}
    +		if len(options.Continue) > 0 {
    +			allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided"))
    +		}
    +	}
    +	return allErrs
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
    index 15b45ffa84..5005beb12d 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
    @@ -18,6 +18,7 @@ package v1
     
     import (
     	"k8s.io/apimachinery/pkg/runtime/schema"
    +	"k8s.io/utils/ptr"
     )
     
     // IsControlledBy checks if the  object has a controllerRef set to the given owner
    @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference {
     		return nil
     	}
     	cp := *ref
    +	cp.Controller = ptr.To(*ref.Controller)
    +	if ref.BlockOwnerDeletion != nil {
    +		cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
    +	}
     	return &cp
     }
     
    -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
    +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
     func GetControllerOfNoCopy(controllee Object) *OwnerReference {
     	refs := controllee.GetOwnerReferences()
     	for i := range refs {
    @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference {
     
     // NewControllerRef creates an OwnerReference pointing to the given owner.
     func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
    -	blockOwnerDeletion := true
    -	isController := true
     	return &OwnerReference{
     		APIVersion:         gvk.GroupVersion().String(),
     		Kind:               gvk.Kind,
     		Name:               owner.GetName(),
     		UID:                owner.GetUID(),
    -		BlockOwnerDeletion: &blockOwnerDeletion,
    -		Controller:         &isController,
    +		BlockOwnerDeletion: ptr.To(true),
    +		Controller:         ptr.To(true),
     	}
     }
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
    index 75b88890f6..229ea2c2c2 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
    @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() {
     
     var xxx_messageInfo_Duration proto.InternalMessageInfo
     
    +func (m *FieldSelectorRequirement) Reset()      { *m = FieldSelectorRequirement{} }
    +func (*FieldSelectorRequirement) ProtoMessage() {}
    +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) {
    +	return fileDescriptor_a8431b6e0aeeb761, []int{10}
    +}
    +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error {
    +	return m.Unmarshal(b)
    +}
    +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
    +	b = b[:cap(b)]
    +	n, err := m.MarshalToSizedBuffer(b)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return b[:n], nil
    +}
    +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) {
    +	xxx_messageInfo_FieldSelectorRequirement.Merge(m, src)
    +}
    +func (m *FieldSelectorRequirement) XXX_Size() int {
    +	return m.Size()
    +}
    +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() {
    +	xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m)
    +}
    +
    +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo
    +
     func (m *FieldsV1) Reset()      { *m = FieldsV1{} }
     func (*FieldsV1) ProtoMessage() {}
     func (*FieldsV1) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{10}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{11}
     }
     func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
     func (m *GetOptions) Reset()      { *m = GetOptions{} }
     func (*GetOptions) ProtoMessage() {}
     func (*GetOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{11}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{12}
     }
     func (m *GetOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo
     func (m *GroupKind) Reset()      { *m = GroupKind{} }
     func (*GroupKind) ProtoMessage() {}
     func (*GroupKind) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{12}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{13}
     }
     func (m *GroupKind) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo
     func (m *GroupResource) Reset()      { *m = GroupResource{} }
     func (*GroupResource) ProtoMessage() {}
     func (*GroupResource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{13}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{14}
     }
     func (m *GroupResource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo
     func (m *GroupVersion) Reset()      { *m = GroupVersion{} }
     func (*GroupVersion) ProtoMessage() {}
     func (*GroupVersion) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{14}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{15}
     }
     func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
     func (m *GroupVersionForDiscovery) Reset()      { *m = GroupVersionForDiscovery{} }
     func (*GroupVersionForDiscovery) ProtoMessage() {}
     func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{15}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{16}
     }
     func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
     func (m *GroupVersionKind) Reset()      { *m = GroupVersionKind{} }
     func (*GroupVersionKind) ProtoMessage() {}
     func (*GroupVersionKind) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{16}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{17}
     }
     func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
     func (m *GroupVersionResource) Reset()      { *m = GroupVersionResource{} }
     func (*GroupVersionResource) ProtoMessage() {}
     func (*GroupVersionResource) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{17}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{18}
     }
     func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
     func (m *LabelSelector) Reset()      { *m = LabelSelector{} }
     func (*LabelSelector) ProtoMessage() {}
     func (*LabelSelector) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{18}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{19}
     }
     func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
     func (m *LabelSelectorRequirement) Reset()      { *m = LabelSelectorRequirement{} }
     func (*LabelSelectorRequirement) ProtoMessage() {}
     func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{19}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{20}
     }
     func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
     func (m *List) Reset()      { *m = List{} }
     func (*List) ProtoMessage() {}
     func (*List) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{20}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{21}
     }
     func (m *List) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
     func (m *ListMeta) Reset()      { *m = ListMeta{} }
     func (*ListMeta) ProtoMessage() {}
     func (*ListMeta) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{21}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{22}
     }
     func (m *ListMeta) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo
     func (m *ListOptions) Reset()      { *m = ListOptions{} }
     func (*ListOptions) ProtoMessage() {}
     func (*ListOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{22}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{23}
     }
     func (m *ListOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo
     func (m *ManagedFieldsEntry) Reset()      { *m = ManagedFieldsEntry{} }
     func (*ManagedFieldsEntry) ProtoMessage() {}
     func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{23}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{24}
     }
     func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
     func (m *MicroTime) Reset()      { *m = MicroTime{} }
     func (*MicroTime) ProtoMessage() {}
     func (*MicroTime) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{24}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{25}
     }
     func (m *MicroTime) XXX_Unmarshal(b []byte) error {
     	return xxx_messageInfo_MicroTime.Unmarshal(m, b)
    @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo
     func (m *ObjectMeta) Reset()      { *m = ObjectMeta{} }
     func (*ObjectMeta) ProtoMessage() {}
     func (*ObjectMeta) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{25}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{26}
     }
     func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
     func (m *OwnerReference) Reset()      { *m = OwnerReference{} }
     func (*OwnerReference) ProtoMessage() {}
     func (*OwnerReference) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{26}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{27}
     }
     func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
     func (m *PartialObjectMetadata) Reset()      { *m = PartialObjectMetadata{} }
     func (*PartialObjectMetadata) ProtoMessage() {}
     func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{27}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{28}
     }
     func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
     func (m *PartialObjectMetadataList) Reset()      { *m = PartialObjectMetadataList{} }
     func (*PartialObjectMetadataList) ProtoMessage() {}
     func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{28}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{29}
     }
     func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
     func (m *Patch) Reset()      { *m = Patch{} }
     func (*Patch) ProtoMessage() {}
     func (*Patch) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{29}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{30}
     }
     func (m *Patch) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo
     func (m *PatchOptions) Reset()      { *m = PatchOptions{} }
     func (*PatchOptions) ProtoMessage() {}
     func (*PatchOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{30}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{31}
     }
     func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
     func (m *Preconditions) Reset()      { *m = Preconditions{} }
     func (*Preconditions) ProtoMessage() {}
     func (*Preconditions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{31}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{32}
     }
     func (m *Preconditions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
     func (m *RootPaths) Reset()      { *m = RootPaths{} }
     func (*RootPaths) ProtoMessage() {}
     func (*RootPaths) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{32}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{33}
     }
     func (m *RootPaths) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo
     func (m *ServerAddressByClientCIDR) Reset()      { *m = ServerAddressByClientCIDR{} }
     func (*ServerAddressByClientCIDR) ProtoMessage() {}
     func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{33}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{34}
     }
     func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
     func (m *Status) Reset()      { *m = Status{} }
     func (*Status) ProtoMessage() {}
     func (*Status) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{34}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{35}
     }
     func (m *Status) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo
     func (m *StatusCause) Reset()      { *m = StatusCause{} }
     func (*StatusCause) ProtoMessage() {}
     func (*StatusCause) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{35}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{36}
     }
     func (m *StatusCause) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo
     func (m *StatusDetails) Reset()      { *m = StatusDetails{} }
     func (*StatusDetails) ProtoMessage() {}
     func (*StatusDetails) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{36}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{37}
     }
     func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
     func (m *TableOptions) Reset()      { *m = TableOptions{} }
     func (*TableOptions) ProtoMessage() {}
     func (*TableOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{37}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{38}
     }
     func (m *TableOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo
     func (m *Time) Reset()      { *m = Time{} }
     func (*Time) ProtoMessage() {}
     func (*Time) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{38}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{39}
     }
     func (m *Time) XXX_Unmarshal(b []byte) error {
     	return xxx_messageInfo_Time.Unmarshal(m, b)
    @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo
     func (m *Timestamp) Reset()      { *m = Timestamp{} }
     func (*Timestamp) ProtoMessage() {}
     func (*Timestamp) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{39}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{40}
     }
     func (m *Timestamp) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo
     func (m *TypeMeta) Reset()      { *m = TypeMeta{} }
     func (*TypeMeta) ProtoMessage() {}
     func (*TypeMeta) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{40}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{41}
     }
     func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
     func (m *UpdateOptions) Reset()      { *m = UpdateOptions{} }
     func (*UpdateOptions) ProtoMessage() {}
     func (*UpdateOptions) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{41}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{42}
     }
     func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
     func (m *Verbs) Reset()      { *m = Verbs{} }
     func (*Verbs) ProtoMessage() {}
     func (*Verbs) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{42}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{43}
     }
     func (m *Verbs) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo
     func (m *WatchEvent) Reset()      { *m = WatchEvent{} }
     func (*WatchEvent) ProtoMessage() {}
     func (*WatchEvent) Descriptor() ([]byte, []int) {
    -	return fileDescriptor_a8431b6e0aeeb761, []int{43}
    +	return fileDescriptor_a8431b6e0aeeb761, []int{44}
     }
     func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
     	return m.Unmarshal(b)
    @@ -1282,6 +1310,7 @@ func init() {
     	proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
     	proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
     	proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
    +	proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement")
     	proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
     	proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
     	proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
    @@ -1326,186 +1355,187 @@ func init() {
     }
     
     var fileDescriptor_a8431b6e0aeeb761 = []byte{
    -	// 2853 bytes of a gzipped FileDescriptorProto
    -	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47,
    -	0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44,
    -	0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d,
    -	0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a,
    -	0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88,
    -	0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85,
    -	0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa,
    -	0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d,
    -	0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58,
    -	0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1,
    -	0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9,
    -	0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f,
    -	0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6,
    -	0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9,
    -	0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad,
    -	0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc,
    -	0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1,
    -	0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70,
    -	0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14,
    -	0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56,
    -	0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2,
    -	0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe,
    -	0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83,
    -	0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10,
    -	0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c,
    -	0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3,
    -	0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d,
    -	0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc,
    -	0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59,
    -	0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b,
    -	0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7,
    -	0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec,
    -	0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09,
    -	0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37,
    -	0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a,
    -	0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40,
    -	0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40,
    -	0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a,
    -	0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef,
    -	0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11,
    -	0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20,
    -	0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4,
    -	0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2,
    -	0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb,
    -	0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf,
    -	0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9,
    -	0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55,
    -	0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d,
    -	0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0,
    -	0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05,
    -	0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51,
    -	0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b,
    -	0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b,
    -	0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3,
    -	0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0,
    -	0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2,
    -	0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d,
    -	0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17,
    -	0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16,
    -	0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc,
    -	0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52,
    -	0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1,
    -	0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59,
    -	0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec,
    -	0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b,
    -	0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83,
    -	0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0,
    -	0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5,
    -	0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50,
    -	0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f,
    -	0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b,
    -	0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0,
    -	0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42,
    -	0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3,
    -	0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc,
    -	0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29,
    -	0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e,
    -	0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85,
    -	0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1,
    -	0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48,
    -	0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb,
    -	0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04,
    -	0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2,
    -	0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f,
    -	0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc,
    -	0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2,
    -	0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72,
    -	0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b,
    -	0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90,
    -	0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21,
    -	0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4,
    -	0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46,
    -	0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11,
    -	0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b,
    -	0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d,
    -	0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d,
    -	0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73,
    -	0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99,
    -	0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a,
    -	0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07,
    -	0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63,
    -	0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7,
    -	0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed,
    -	0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e,
    -	0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13,
    -	0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde,
    -	0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f,
    -	0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1,
    -	0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19,
    -	0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50,
    -	0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84,
    -	0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c,
    -	0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44,
    -	0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69,
    -	0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e,
    -	0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e,
    -	0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71,
    -	0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae,
    -	0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88,
    -	0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef,
    -	0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd,
    -	0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87,
    -	0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21,
    -	0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77,
    -	0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76,
    -	0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64,
    -	0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87,
    -	0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30,
    -	0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58,
    -	0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd,
    -	0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75,
    -	0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47,
    -	0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e,
    -	0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55,
    -	0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15,
    -	0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75,
    -	0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a,
    -	0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c,
    -	0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08,
    -	0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf,
    -	0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3,
    -	0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15,
    -	0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1,
    -	0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98,
    -	0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44,
    -	0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63,
    -	0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e,
    -	0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d,
    -	0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64,
    -	0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4,
    -	0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb,
    -	0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89,
    -	0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78,
    -	0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a,
    -	0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2,
    -	0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4,
    -	0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae,
    -	0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87,
    -	0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05,
    -	0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3,
    -	0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6,
    -	0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce,
    -	0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f,
    -	0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1,
    -	0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8,
    -	0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37,
    -	0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50,
    -	0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c,
    -	0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25,
    -	0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d,
    -	0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f,
    -	0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8,
    -	0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4,
    -	0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca,
    -	0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c,
    -	0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a,
    -	0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5,
    -	0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff,
    -	0xd4, 0x07, 0x28, 0x00, 0x00,
    +	// 2873 bytes of a gzipped FileDescriptorProto
    +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
    +	0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
    +	0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
    +	0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
    +	0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
    +	0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
    +	0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
    +	0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
    +	0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
    +	0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
    +	0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
    +	0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
    +	0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
    +	0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
    +	0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
    +	0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
    +	0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
    +	0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
    +	0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
    +	0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
    +	0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
    +	0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
    +	0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
    +	0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
    +	0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
    +	0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
    +	0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
    +	0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
    +	0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
    +	0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
    +	0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
    +	0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
    +	0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
    +	0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
    +	0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
    +	0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
    +	0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
    +	0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
    +	0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
    +	0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
    +	0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
    +	0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
    +	0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
    +	0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
    +	0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
    +	0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
    +	0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
    +	0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
    +	0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
    +	0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
    +	0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
    +	0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
    +	0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
    +	0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
    +	0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
    +	0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
    +	0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
    +	0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
    +	0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
    +	0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
    +	0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
    +	0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
    +	0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
    +	0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
    +	0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
    +	0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
    +	0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
    +	0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
    +	0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
    +	0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
    +	0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
    +	0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
    +	0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
    +	0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
    +	0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
    +	0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
    +	0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
    +	0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
    +	0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
    +	0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
    +	0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
    +	0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
    +	0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
    +	0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
    +	0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
    +	0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
    +	0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
    +	0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
    +	0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
    +	0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
    +	0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
    +	0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
    +	0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
    +	0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
    +	0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
    +	0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
    +	0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
    +	0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
    +	0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
    +	0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
    +	0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
    +	0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
    +	0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
    +	0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
    +	0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
    +	0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
    +	0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
    +	0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
    +	0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
    +	0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
    +	0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
    +	0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
    +	0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
    +	0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
    +	0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
    +	0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
    +	0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
    +	0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
    +	0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
    +	0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
    +	0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
    +	0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
    +	0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
    +	0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
    +	0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
    +	0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
    +	0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
    +	0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
    +	0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
    +	0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
    +	0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
    +	0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
    +	0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
    +	0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
    +	0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
    +	0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
    +	0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
    +	0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
    +	0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
    +	0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
    +	0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
    +	0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
    +	0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
    +	0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
    +	0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
    +	0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
    +	0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
    +	0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
    +	0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
    +	0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
    +	0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
    +	0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
    +	0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
    +	0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
    +	0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
    +	0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
    +	0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
    +	0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
    +	0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
    +	0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
    +	0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
    +	0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
    +	0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
    +	0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
    +	0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
    +	0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
    +	0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
    +	0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
    +	0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
    +	0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
    +	0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
    +	0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
    +	0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
    +	0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
    +	0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
    +	0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
    +	0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
    +	0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
    +	0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
    +	0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
     }
     
     func (m *APIGroup) Marshal() (dAtA []byte, err error) {
    @@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
     	return len(dAtA) - i, nil
     }
     
    +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) {
    +	size := m.Size()
    +	dAtA = make([]byte, size)
    +	n, err := m.MarshalToSizedBuffer(dAtA[:size])
    +	if err != nil {
    +		return nil, err
    +	}
    +	return dAtA[:n], nil
    +}
    +
    +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
    +	size := m.Size()
    +	return m.MarshalToSizedBuffer(dAtA[:size])
    +}
    +
    +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
    +	i := len(dAtA)
    +	_ = i
    +	var l int
    +	_ = l
    +	if len(m.Values) > 0 {
    +		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
    +			i -= len(m.Values[iNdEx])
    +			copy(dAtA[i:], m.Values[iNdEx])
    +			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
    +			i--
    +			dAtA[i] = 0x1a
    +		}
    +	}
    +	i -= len(m.Operator)
    +	copy(dAtA[i:], m.Operator)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
    +	i--
    +	dAtA[i] = 0x12
    +	i -= len(m.Key)
    +	copy(dAtA[i:], m.Key)
    +	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
    +	i--
    +	dAtA[i] = 0xa
    +	return len(dAtA) - i, nil
    +}
    +
     func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
     	size := m.Size()
     	dAtA = make([]byte, size)
    @@ -3714,6 +3786,25 @@ func (m *Duration) Size() (n int) {
     	return n
     }
     
    +func (m *FieldSelectorRequirement) Size() (n int) {
    +	if m == nil {
    +		return 0
    +	}
    +	var l int
    +	_ = l
    +	l = len(m.Key)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	l = len(m.Operator)
    +	n += 1 + l + sovGenerated(uint64(l))
    +	if len(m.Values) > 0 {
    +		for _, s := range m.Values {
    +			l = len(s)
    +			n += 1 + l + sovGenerated(uint64(l))
    +		}
    +	}
    +	return n
    +}
    +
     func (m *FieldsV1) Size() (n int) {
     	if m == nil {
     		return 0
    @@ -4429,6 +4520,18 @@ func (this *Duration) String() string {
     	}, "")
     	return s
     }
    +func (this *FieldSelectorRequirement) String() string {
    +	if this == nil {
    +		return "nil"
    +	}
    +	s := strings.Join([]string{`&FieldSelectorRequirement{`,
    +		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
    +		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
    +		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
    +		`}`,
    +	}, "")
    +	return s
    +}
     func (this *GetOptions) String() string {
     	if this == nil {
     		return "nil"
    @@ -6443,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
     	}
     	return nil
     }
    +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error {
    +	l := len(dAtA)
    +	iNdEx := 0
    +	for iNdEx < l {
    +		preIndex := iNdEx
    +		var wire uint64
    +		for shift := uint(0); ; shift += 7 {
    +			if shift >= 64 {
    +				return ErrIntOverflowGenerated
    +			}
    +			if iNdEx >= l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			b := dAtA[iNdEx]
    +			iNdEx++
    +			wire |= uint64(b&0x7F) << shift
    +			if b < 0x80 {
    +				break
    +			}
    +		}
    +		fieldNum := int32(wire >> 3)
    +		wireType := int(wire & 0x7)
    +		if wireType == 4 {
    +			return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group")
    +		}
    +		if fieldNum <= 0 {
    +			return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
    +		}
    +		switch fieldNum {
    +		case 1:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Key = string(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 2:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex])
    +			iNdEx = postIndex
    +		case 3:
    +			if wireType != 2 {
    +				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
    +			}
    +			var stringLen uint64
    +			for shift := uint(0); ; shift += 7 {
    +				if shift >= 64 {
    +					return ErrIntOverflowGenerated
    +				}
    +				if iNdEx >= l {
    +					return io.ErrUnexpectedEOF
    +				}
    +				b := dAtA[iNdEx]
    +				iNdEx++
    +				stringLen |= uint64(b&0x7F) << shift
    +				if b < 0x80 {
    +					break
    +				}
    +			}
    +			intStringLen := int(stringLen)
    +			if intStringLen < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			postIndex := iNdEx + intStringLen
    +			if postIndex < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if postIndex > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
    +			iNdEx = postIndex
    +		default:
    +			iNdEx = preIndex
    +			skippy, err := skipGenerated(dAtA[iNdEx:])
    +			if err != nil {
    +				return err
    +			}
    +			if (skippy < 0) || (iNdEx+skippy) < 0 {
    +				return ErrInvalidLengthGenerated
    +			}
    +			if (iNdEx + skippy) > l {
    +				return io.ErrUnexpectedEOF
    +			}
    +			iNdEx += skippy
    +		}
    +	}
    +
    +	if iNdEx > l {
    +		return io.ErrUnexpectedEOF
    +	}
    +	return nil
    +}
     func (m *FieldsV1) Unmarshal(dAtA []byte) error {
     	l := len(dAtA)
     	iNdEx := 0
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
    index 2b95700f72..18dd0b067c 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
    @@ -324,6 +324,25 @@ message Duration {
       optional int64 duration = 1;
     }
     
    +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
    +// relates the key and values.
    +message FieldSelectorRequirement {
    +  // key is the field selector key that the requirement applies to.
    +  optional string key = 1;
    +
    +  // operator represents a key's relationship to a set of values.
    +  // Valid operators are In, NotIn, Exists, DoesNotExist.
    +  // The list of operators may grow in the future.
    +  optional string operator = 2;
    +
    +  // values is an array of string values.
    +  // If the operator is In or NotIn, the values array must be non-empty.
    +  // If the operator is Exists or DoesNotExist, the values array must be empty.
    +  // +optional
    +  // +listType=atomic
    +  repeated string values = 3;
    +}
    +
     // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
     //
     // Each key is either a '.' representing the field itself, and will always map to an empty set,
    @@ -460,7 +479,7 @@ message List {
       optional ListMeta metadata = 1;
     
       // List of objects
    -  repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
    +  repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
     }
     
     // ListMeta describes metadata that synthetic resources must have, including lists and
    @@ -1209,6 +1228,6 @@ message WatchEvent {
       //  * If Type is Deleted: the state of the object immediately before deletion.
       //  * If Type is Error: *Status is recommended; other types may make sense
       //    depending on context.
    -  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
    +  optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
     }
     
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
    index 592dcb8a74..c748071ed7 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
    @@ -24,8 +24,10 @@ import (
     
     	"k8s.io/apimachinery/pkg/fields"
     	"k8s.io/apimachinery/pkg/labels"
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
     	"k8s.io/apimachinery/pkg/selection"
     	"k8s.io/apimachinery/pkg/types"
    +	utiljson "k8s.io/apimachinery/pkg/util/json"
     )
     
     // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
    @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) {
     	if f.Raw == nil {
     		return []byte("null"), nil
     	}
    +	if f.getContentType() == fieldsV1InvalidOrValidCBORObject {
    +		var u map[string]interface{}
    +		if err := cbor.Unmarshal(f.Raw, &u); err != nil {
    +			return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err)
    +		}
    +		return utiljson.Marshal(u)
    +	}
     	return f.Raw, nil
     }
     
     // UnmarshalJSON implements json.Unmarshaler
     func (f *FieldsV1) UnmarshalJSON(b []byte) error {
     	if f == nil {
    -		return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
    +		return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer")
     	}
     	if !bytes.Equal(b, []byte("null")) {
     		f.Raw = append(f.Raw[0:0], b...)
    @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error {
     
     var _ json.Marshaler = FieldsV1{}
     var _ json.Unmarshaler = &FieldsV1{}
    +
    +func (f FieldsV1) MarshalCBOR() ([]byte, error) {
    +	if f.Raw == nil {
    +		return cbor.Marshal(nil)
    +	}
    +	if f.getContentType() == fieldsV1InvalidOrValidJSONObject {
    +		var u map[string]interface{}
    +		if err := utiljson.Unmarshal(f.Raw, &u); err != nil {
    +			return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err)
    +		}
    +		return cbor.Marshal(u)
    +	}
    +	return f.Raw, nil
    +}
    +
    +var cborNull = []byte{0xf6}
    +
    +func (f *FieldsV1) UnmarshalCBOR(b []byte) error {
    +	if f == nil {
    +		return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer")
    +	}
    +	if !bytes.Equal(b, cborNull) {
    +		f.Raw = append(f.Raw[0:0], b...)
    +	}
    +	return nil
    +}
    +
    +const (
    +	// fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw
    +	// bytes don't represent an allowable value in any supported encoding.
    +	fieldsV1InvalidOrEmpty = iota
    +
    +	// fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that
    +	// are a valid JSON encoding of an allowable value or don't represent an allowable value in
    +	// any supported encoding.
    +	fieldsV1InvalidOrValidJSONObject
    +
    +	// fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that
    +	// are a valid CBOR encoding of an allowable value or don't represent an allowable value in
    +	// any supported encoding.
    +	fieldsV1InvalidOrValidCBORObject
    +)
    +
    +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject,
    +// fieldsV1InvalidOrValidCBORObject based on the value of Raw.
    +//
    +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map)
    +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty
    +// and represents an allowable value, then the initial byte unambiguously distinguishes a
    +// JSON-encoded value from a CBOR-encoded value.
    +//
    +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first
    +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid
    +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map",
    +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or
    +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect.
    +func (f FieldsV1) getContentType() int {
    +	if len(f.Raw) > 0 {
    +		p := f.Raw[0]
    +		switch p {
    +		case 'n', '{', '\t', '\r', '\n', ' ':
    +			return fieldsV1InvalidOrValidJSONObject
    +		case 0xf6: // null
    +			return fieldsV1InvalidOrValidCBORObject
    +		default:
    +			if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ {
    +				return fieldsV1InvalidOrValidCBORObject
    +			}
    +		}
    +	}
    +	return fieldsV1InvalidOrEmpty
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
    index 8eb37f4367..9f302b3f36 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
    @@ -19,6 +19,8 @@ package v1
     import (
     	"encoding/json"
     	"time"
    +
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
     )
     
     const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
    @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error {
     	return nil
     }
     
    +func (t *MicroTime) UnmarshalCBOR(b []byte) error {
    +	var s *string
    +	if err := cbor.Unmarshal(b, &s); err != nil {
    +		return err
    +	}
    +	if s == nil {
    +		t.Time = time.Time{}
    +		return nil
    +	}
    +
    +	parsed, err := time.Parse(RFC3339Micro, *s)
    +	if err != nil {
    +		return err
    +	}
    +
    +	t.Time = parsed.Local()
    +	return nil
    +}
    +
     // UnmarshalQueryParameter converts from a URL query parameter value to an object
     func (t *MicroTime) UnmarshalQueryParameter(str string) error {
     	if len(str) == 0 {
    @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) {
     	return json.Marshal(t.UTC().Format(RFC3339Micro))
     }
     
    +func (t MicroTime) MarshalCBOR() ([]byte, error) {
    +	if t.IsZero() {
    +		return cbor.Marshal(nil)
    +	}
    +	return cbor.Marshal(t.UTC().Format(RFC3339Micro))
    +}
    +
     // OpenAPISchemaType is used by the kube-openapi generator when constructing
     // the OpenAPI spec of this type.
     //
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
    index 421770d432..0333cfdb33 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
    @@ -19,6 +19,8 @@ package v1
     import (
     	"encoding/json"
     	"time"
    +
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
     )
     
     // Time is a wrapper around time.Time which supports correct
    @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error {
     	return nil
     }
     
    +func (t *Time) UnmarshalCBOR(b []byte) error {
    +	var s *string
    +	if err := cbor.Unmarshal(b, &s); err != nil {
    +		return err
    +	}
    +	if s == nil {
    +		t.Time = time.Time{}
    +		return nil
    +	}
    +
    +	parsed, err := time.Parse(time.RFC3339, *s)
    +	if err != nil {
    +		return err
    +	}
    +
    +	t.Time = parsed.Local()
    +	return nil
    +}
    +
     // UnmarshalQueryParameter converts from a URL query parameter value to an object
     func (t *Time) UnmarshalQueryParameter(str string) error {
     	if len(str) == 0 {
    @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) {
     	return buf, nil
     }
     
    +func (t Time) MarshalCBOR() ([]byte, error) {
    +	if t.IsZero() {
    +		return cbor.Marshal(nil)
    +	}
    +
    +	return cbor.Marshal(t.UTC().Format(time.RFC3339))
    +}
    +
     // ToUnstructured implements the value.UnstructuredConverter interface.
     func (t Time) ToUnstructured() interface{} {
     	if t.IsZero() {
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
    index 9695ba50b4..473adb9ef5 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
    @@ -1278,6 +1278,33 @@ const (
     	LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
     )
     
    +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
    +// relates the key and values.
    +type FieldSelectorRequirement struct {
    +	// key is the field selector key that the requirement applies to.
    +	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
    +	// operator represents a key's relationship to a set of values.
    +	// Valid operators are In, NotIn, Exists, DoesNotExist.
    +	// The list of operators may grow in the future.
    +	Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"`
    +	// values is an array of string values.
    +	// If the operator is In or NotIn, the values array must be non-empty.
    +	// If the operator is Exists or DoesNotExist, the values array must be empty.
    +	// +optional
    +	// +listType=atomic
    +	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
    +}
    +
    +// A field selector operator is the set of operators that can be used in a selector requirement.
    +type FieldSelectorOperator string
    +
    +const (
    +	FieldSelectorOpIn           FieldSelectorOperator = "In"
    +	FieldSelectorOpNotIn        FieldSelectorOperator = "NotIn"
    +	FieldSelectorOpExists       FieldSelectorOperator = "Exists"
    +	FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist"
    +)
    +
     // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
     // that the fieldset applies to.
     type ManagedFieldsEntry struct {
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
    index b736e83712..1fa37215cd 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
    @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string {
     	return map_DeleteOptions
     }
     
    +var map_FieldSelectorRequirement = map[string]string{
    +	"":         "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.",
    +	"key":      "key is the field selector key that the requirement applies to.",
    +	"operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.",
    +	"values":   "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.",
    +}
    +
    +func (FieldSelectorRequirement) SwaggerDoc() map[string]string {
    +	return map_FieldSelectorRequirement
    +}
    +
     var map_FieldsV1 = map[string]string{
     	"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where  is the name of a field in a struct, or key in a map 'v:', where  is the exact json formatted value of a list item 'i:', where  is position of a item in a list 'k:', where  is a map of  a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
     }
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
    index a0f709ad86..3eba5ba541 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
    @@ -32,6 +32,10 @@ import (
     type LabelSelectorValidationOptions struct {
     	// Allow invalid label value in selector
     	AllowInvalidLabelValueInSelector bool
    +
    +	// Allows an operator that is not interpretable to pass validation.  This is useful for cases where a broader check
    +	// can be performed, as in a *SubjectAccessReview
    +	AllowUnknownOperatorInRequirement bool
     }
     
     // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression.
    @@ -79,7 +83,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L
     			allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
     		}
     	default:
    -		allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
    +		if !opts.AllowUnknownOperatorInRequirement {
    +			allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
    +		}
     	}
     	allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
     	if !opts.AllowInvalidLabelValueInSelector {
    @@ -113,6 +119,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi
     	return allErrs
     }
     
    +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options
    +type FieldSelectorValidationOptions struct {
    +	// Allows an operator that is not interpretable to pass validation.  This is useful for cases where a broader check
    +	// can be performed, as in a *SubjectAccessReview
    +	AllowUnknownOperatorInRequirement bool
    +}
    +
    +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors.
    +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
    +	allErrs := field.ErrorList{}
    +
    +	if len(requirement.Key) == 0 {
    +		allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified"))
    +	}
    +
    +	switch requirement.Operator {
    +	case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn:
    +		if len(requirement.Values) == 0 {
    +			allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
    +		}
    +	case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist:
    +		if len(requirement.Values) > 0 {
    +			allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
    +		}
    +	default:
    +		if !opts.AllowUnknownOperatorInRequirement {
    +			allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator"))
    +		}
    +	}
    +
    +	return allErrs
    +}
    +
     func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
     	allErrs := field.ErrorList{}
     	//lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
    index 7d29c504ab..90cc54a7e7 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
    @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration {
     	return out
     }
     
    +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
    +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) {
    +	*out = *in
    +	if in.Values != nil {
    +		in, out := &in.Values, &out.Values
    +		*out = make([]string, len(*in))
    +		copy(*out, *in)
    +	}
    +	return
    +}
    +
    +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement.
    +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement {
    +	if in == nil {
    +		return nil
    +	}
    +	out := new(FieldSelectorRequirement)
    +	in.DeepCopyInto(out)
    +	return out
    +}
    +
     // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
     func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
     	*out = *in
    diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
    index d14d42591b..fcec553542 100644
    --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
    +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
    @@ -33,9 +33,9 @@ message PartialObjectMetadataList {
       // Standard list metadata.
       // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
       // +optional
    -  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
    +  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
     
       // items contains each of the included items.
    -  repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
    +  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
     }
     
    diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
    index 5e60142405..9e22a00564 100644
    --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
    +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
    @@ -45,6 +45,19 @@ var (
     // Requirements is AND of all requirements.
     type Requirements []Requirement
     
    +func (r Requirements) String() string {
    +	var sb strings.Builder
    +
    +	for i, requirement := range r {
    +		if i > 0 {
    +			sb.WriteString(", ")
    +		}
    +		sb.WriteString(requirement.String())
    +	}
    +
    +	return sb.String()
    +}
    +
     // Selector represents a label selector.
     type Selector interface {
     	// Matches returns true if this selector matches the given set of labels.
    @@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String {
     	return ret
     }
     
    +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting.
    +func (r *Requirement) ValuesUnsorted() []string {
    +	ret := make([]string, 0, len(r.strValues))
    +	ret = append(ret, r.strValues...)
    +	return ret
    +}
    +
     // Equal checks the equality of requirement.
     func (r Requirement) Equal(x Requirement) bool {
     	if r.key != x.key {
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
    index 9056397fa5..60c000bcb7 100644
    --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
    @@ -18,16 +18,77 @@ package runtime
     
     import (
     	"bytes"
    -	"encoding/json"
     	"errors"
    +	"fmt"
    +
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
    +	"k8s.io/apimachinery/pkg/util/json"
     )
     
    +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the
    +// signature of ToUnstructured does not allow returning an error value in cases where the conversion
    +// is not possible (content type is unrecognized or bytes don't match content type).
    +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) {
    +	switch contentType {
    +	case ContentTypeJSON:
    +		var u interface{}
    +		if err := json.Unmarshal(raw, &u); err != nil {
    +			return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err)
    +		}
    +		return u, nil
    +	case ContentTypeCBOR:
    +		var u interface{}
    +		if err := cbor.Unmarshal(raw, &u); err != nil {
    +			return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err)
    +		}
    +		return u, nil
    +	default:
    +		return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured")
    +	}
    +}
    +
    +func (re RawExtension) guessContentType() string {
    +	switch {
    +	case bytes.HasPrefix(re.Raw, cborSelfDescribed):
    +		return ContentTypeCBOR
    +	case len(re.Raw) > 0:
    +		switch re.Raw[0] {
    +		case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
    +			// Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null.
    +			return ContentTypeJSON
    +		}
    +	}
    +	return ""
    +}
    +
     func (re *RawExtension) UnmarshalJSON(in []byte) error {
     	if re == nil {
     		return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
     	}
    -	if !bytes.Equal(in, []byte("null")) {
    -		re.Raw = append(re.Raw[0:0], in...)
    +	if bytes.Equal(in, []byte("null")) {
    +		return nil
    +	}
    +	re.Raw = append(re.Raw[0:0], in...)
    +	return nil
    +}
    +
    +var (
    +	cborNull          = []byte{0xf6}
    +	cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7}
    +)
    +
    +func (re *RawExtension) UnmarshalCBOR(in []byte) error {
    +	if re == nil {
    +		return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer")
    +	}
    +	if !bytes.Equal(in, cborNull) {
    +		if !bytes.HasPrefix(in, cborSelfDescribed) {
    +			// The self-described CBOR tag doesn't change the interpretation of the data
    +			// item it encloses, but it is useful as a magic number. Its encoding is
    +			// also what is used to implement the CBOR RecognizingDecoder.
    +			re.Raw = append(re.Raw[:0], cborSelfDescribed...)
    +		}
    +		re.Raw = append(re.Raw, in...)
     	}
     	return nil
     }
    @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) {
     		}
     		return []byte("null"), nil
     	}
    -	// TODO: Check whether ContentType is actually JSON before returning it.
    -	return re.Raw, nil
    +
    +	contentType := re.guessContentType()
    +	if contentType == ContentTypeJSON {
    +		return re.Raw, nil
    +	}
    +
    +	u, err := rawToUnstructured(re.Raw, contentType)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return json.Marshal(u)
    +}
    +
    +func (re RawExtension) MarshalCBOR() ([]byte, error) {
    +	if re.Raw == nil {
    +		if re.Object != nil {
    +			return cbor.Marshal(re.Object)
    +		}
    +		return cbor.Marshal(nil)
    +	}
    +
    +	contentType := re.guessContentType()
    +	if contentType == ContentTypeCBOR {
    +		return re.Raw, nil
    +	}
    +
    +	u, err := rawToUnstructured(re.Raw, contentType)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return cbor.Marshal(u)
     }
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
    new file mode 100644
    index 0000000000..cd78b1df26
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
    @@ -0,0 +1,36 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and
    +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular,
    +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions.
    +package direct
    +
    +import (
    +	"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
    +)
    +
    +func Marshal(src interface{}) ([]byte, error) {
    +	return modes.Encode.Marshal(src)
    +}
    +
    +func Unmarshal(src []byte, dst interface{}) error {
    +	return modes.Decode.Unmarshal(src, dst)
    +}
    +
    +func Diagnose(src []byte) (string, error) {
    +	return modes.Diagnostic.Diagnose(src)
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
    new file mode 100644
    index 0000000000..f14cbd6b58
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
    @@ -0,0 +1,65 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package modes
    +
    +import (
    +	"bytes"
    +	"sync"
    +)
    +
    +var buffers = BufferProvider{p: new(sync.Pool)}
    +
    +type buffer struct {
    +	bytes.Buffer
    +}
    +
    +type pool interface {
    +	Get() interface{}
    +	Put(interface{})
    +}
    +
    +type BufferProvider struct {
    +	p pool
    +}
    +
    +func (b *BufferProvider) Get() *buffer {
    +	if buf, ok := b.p.Get().(*buffer); ok {
    +		return buf
    +	}
    +	return &buffer{}
    +}
    +
    +func (b *BufferProvider) Put(buf *buffer) {
    +	if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ {
    +		// Objects in a sync.Pool are assumed to be fungible. This is not a good assumption
    +		// for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as
    +		// needed to accommodate writes. In Kubernetes, apiservers tend to encode "small"
    +		// objects very frequently and much larger objects (especially large lists) only
    +		// occasionally. Under steady load, pooled buffers tend to be borrowed frequently
    +		// enough to prevent them from being released. Over time, each buffer is used to
    +		// encode a large object and its capacity increases accordingly. The result is that
    +		// practically all buffers in the pool retain much more capacity than needed to
    +		// encode most objects.
    +
    +		// As a basic mitigation for the worst case, buffers with more capacity than the
    +		// default max request body size are never returned to the pool.
    +		// TODO: Optimize for higher buffer utilization.
    +		return
    +	}
    +	buf.Reset()
    +	b.p.Put(buf)
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
    new file mode 100644
    index 0000000000..858529e958
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
    @@ -0,0 +1,422 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package modes
    +
    +import (
    +	"encoding"
    +	"encoding/json"
    +	"errors"
    +	"fmt"
    +	"reflect"
    +	"sync"
    +
    +	"github.com/fxamacker/cbor/v2"
    +)
    +
    +// Returns a non-nil error if and only if the argument's type (or one of its component types, for
    +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing
    +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces.
    +//
    +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic
    +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be
    +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree
    +// types in client programs.
    +func RejectCustomMarshalers(v interface{}) error {
    +	if v == nil {
    +		return nil
    +	}
    +	rv := reflect.ValueOf(v)
    +	if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
    +		return fmt.Errorf("unable to serialize %T: %w", v, err)
    +	}
    +	if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
    +		return fmt.Errorf("unable to serialize %T: %w", v, err)
    +	}
    +	return nil
    +}
    +
    +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the
    +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the
    +// encoder.
    +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)")
    +
    +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing
    +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of
    +// this mechanism.
    +const maxDepth = 2048
    +
    +var marshalerCache = checkers{
    +	cborInterface: reflect.TypeFor[cbor.Marshaler](),
    +	nonCBORInterfaces: []reflect.Type{
    +		reflect.TypeFor[json.Marshaler](),
    +		reflect.TypeFor[encoding.TextMarshaler](),
    +	},
    +}
    +
    +var unmarshalerCache = checkers{
    +	cborInterface: reflect.TypeFor[cbor.Unmarshaler](),
    +	nonCBORInterfaces: []reflect.Type{
    +		reflect.TypeFor[json.Unmarshaler](),
    +		reflect.TypeFor[encoding.TextUnmarshaler](),
    +	},
    +	assumeAddressableValues: true,
    +}
    +
    +// checker wraps a function for dynamically checking a value of a specific type for custom JSON
    +// behaviors not matched by a custom CBOR behavior.
    +type checker struct {
    +	// check returns a non-nil error if the given value might be marshalled to or from CBOR
    +	// using the default behavior for its kind, but marshalled to or from JSON using custom
    +	// behavior.
    +	check func(rv reflect.Value, depth int) error
    +
    +	// safe returns true if all values of this type are safe from mismatched custom marshalers.
    +	safe func() bool
    +}
    +
    +// TODO: stale
    +// Having a single addressable checker for comparisons lets us prune and collapse parts of the
    +// object traversal that are statically known to be safe. Depending on the type, it may be
    +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool
    +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a
    +// distinct type from bool).
    +var noop = checker{
    +	safe: func() bool {
    +		return true
    +	},
    +	check: func(rv reflect.Value, depth int) error {
    +		return nil
    +	},
    +}
    +
    +type checkers struct {
    +	m sync.Map // reflect.Type => *checker
    +
    +	cborInterface     reflect.Type
    +	nonCBORInterfaces []reflect.Type
    +
    +	assumeAddressableValues bool
    +}
    +
    +func (cache *checkers) getChecker(rt reflect.Type) checker {
    +	if ptr, ok := cache.m.Load(rt); ok {
    +		return *ptr.(*checker)
    +	}
    +
    +	return cache.getCheckerInternal(rt, nil)
    +}
    +
    +// linked list node representing the path from a composite type to an element type
    +type path struct {
    +	Type   reflect.Type
    +	Parent *path
    +}
    +
    +func (p path) cyclic(rt reflect.Type) bool {
    +	for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent {
    +		if ancestor.Type == rt {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) {
    +	// Store a placeholder cache entry first to handle cyclic types.
    +	var wg sync.WaitGroup
    +	wg.Add(1)
    +	defer wg.Done()
    +	c = checker{
    +		safe: func() bool {
    +			wg.Wait()
    +			return c.safe()
    +		},
    +		check: func(rv reflect.Value, depth int) error {
    +			wg.Wait()
    +			return c.check(rv, depth)
    +		},
    +	}
    +	if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded {
    +		// Someone else stored an entry for this type, use it.
    +		return *actual.(*checker)
    +	}
    +
    +	// Take a nonreflective path for the unstructured container types. They're common and
    +	// usually nested inside one another.
    +	switch rt {
    +	case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}]():
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				return checkUnstructuredValue(cache, rv.Interface(), depth)
    +			},
    +		}
    +	}
    +
    +	// It's possible that one of the relevant interfaces is implemented on a type with a pointer
    +	// receiver, but that a particular value of that type is not addressable. For example:
    +	//
    +	//   func (Foo) MarshalText() ([]byte, error) { ... }
    +	//   func (*Foo) MarshalCBOR() ([]byte, error) { ... }
    +	//
    +	// Both methods are in the method set of *Foo, but the method set of Foo contains only
    +	// MarshalText.
    +	//
    +	// Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text
    +	// interface with a pointer receiver are always accessible. Only the unmarshaler check
    +	// assumes that CBOR methods with pointer receivers are accessible.
    +
    +	if rt.Implements(cache.cborInterface) {
    +		return noop
    +	}
    +	for _, unsafe := range cache.nonCBORInterfaces {
    +		if rt.Implements(unsafe) {
    +			err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe)
    +			return checker{
    +				safe: func() bool {
    +					return false
    +				},
    +				check: func(reflect.Value, int) error {
    +					return err
    +				},
    +			}
    +		}
    +	}
    +
    +	if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) {
    +		return noop
    +	}
    +	for _, unsafe := range cache.nonCBORInterfaces {
    +		if reflect.PointerTo(rt).Implements(unsafe) {
    +			err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe)
    +			return checker{
    +				safe: func() bool {
    +					return false
    +				},
    +				check: func(reflect.Value, int) error {
    +					return err
    +				},
    +			}
    +		}
    +	}
    +
    +	self := &path{Type: rt, Parent: parent}
    +
    +	switch rt.Kind() {
    +	case reflect.Array:
    +		ce := cache.getCheckerInternal(rt.Elem(), self)
    +		rtlen := rt.Len()
    +		if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) {
    +			return noop
    +		}
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				for i := 0; i < rtlen; i++ {
    +					if err := ce.check(rv.Index(i), depth-1); err != nil {
    +						return err
    +					}
    +				}
    +				return nil
    +			},
    +		}
    +
    +	case reflect.Interface:
    +		// All interface values have to be checked because their dynamic type might
    +		// implement one of the interesting interfaces or be composed of another type that
    +		// does.
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if rv.IsNil() {
    +					return nil
    +				}
    +				// Unpacking interfaces must count against recursion depth,
    +				// consider this cycle:
    +				// >  var i interface{}
    +				// >  var p *interface{} = &i
    +				// >  i = p
    +				// >  rv := reflect.ValueOf(i)
    +				// >  for {
    +				// >    rv = rv.Elem()
    +				// >  }
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				rv = rv.Elem()
    +				return cache.getChecker(rv.Type()).check(rv, depth-1)
    +			},
    +		}
    +
    +	case reflect.Map:
    +		rtk := rt.Key()
    +		ck := cache.getCheckerInternal(rtk, self)
    +		rte := rt.Elem()
    +		ce := cache.getCheckerInternal(rte, self)
    +		if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() {
    +			return noop
    +		}
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				iter := rv.MapRange()
    +				rvk := reflect.New(rtk).Elem()
    +				rve := reflect.New(rte).Elem()
    +				for iter.Next() {
    +					rvk.SetIterKey(iter)
    +					if err := ck.check(rvk, depth-1); err != nil {
    +						return err
    +					}
    +					rve.SetIterValue(iter)
    +					if err := ce.check(rve, depth-1); err != nil {
    +						return err
    +					}
    +				}
    +				return nil
    +			},
    +		}
    +
    +	case reflect.Pointer:
    +		ce := cache.getCheckerInternal(rt.Elem(), self)
    +		if !self.cyclic(rt.Elem()) && ce.safe() {
    +			return noop
    +		}
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if rv.IsNil() {
    +					return nil
    +				}
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				return ce.check(rv.Elem(), depth-1)
    +			},
    +		}
    +
    +	case reflect.Slice:
    +		ce := cache.getCheckerInternal(rt.Elem(), self)
    +		if !self.cyclic(rt.Elem()) && ce.safe() {
    +			return noop
    +		}
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				for i := 0; i < rv.Len(); i++ {
    +					if err := ce.check(rv.Index(i), depth-1); err != nil {
    +						return err
    +					}
    +				}
    +				return nil
    +			},
    +		}
    +
    +	case reflect.Struct:
    +		type field struct {
    +			Index   int
    +			Checker checker
    +		}
    +		var fields []field
    +		for i := 0; i < rt.NumField(); i++ {
    +			f := rt.Field(i)
    +			cf := cache.getCheckerInternal(f.Type, self)
    +			if !self.cyclic(f.Type) && cf.safe() {
    +				continue
    +			}
    +			fields = append(fields, field{Index: i, Checker: cf})
    +		}
    +		if len(fields) == 0 {
    +			return noop
    +		}
    +		return checker{
    +			safe: func() bool {
    +				return false
    +			},
    +			check: func(rv reflect.Value, depth int) error {
    +				if depth <= 0 {
    +					return errMaxDepthExceeded
    +				}
    +				for _, fi := range fields {
    +					if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil {
    +						return err
    +					}
    +				}
    +				return nil
    +			},
    +		}
    +
    +	default:
    +		// Not a serializable composite type (funcs and channels are composite types but are
    +		// rejected by JSON and CBOR serialization).
    +		return noop
    +
    +	}
    +}
    +
    +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error {
    +	switch v := v.(type) {
    +	case nil, bool, int64, float64, string:
    +		return nil
    +	case []interface{}:
    +		if depth <= 0 {
    +			return errMaxDepthExceeded
    +		}
    +		for _, element := range v {
    +			if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
    +				return err
    +			}
    +		}
    +		return nil
    +	case map[string]interface{}:
    +		if depth <= 0 {
    +			return errMaxDepthExceeded
    +		}
    +		for _, element := range v {
    +			if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
    +				return err
    +			}
    +		}
    +		return nil
    +	default:
    +		// Unmarshaling an unstructured doesn't use other dynamic types, but nothing
    +		// prevents inserting values with arbitrary dynamic types into unstructured content,
    +		// as long as they can be marshalled.
    +		rv := reflect.ValueOf(v)
    +		return cache.getChecker(rv.Type()).check(rv, depth)
    +	}
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
    new file mode 100644
    index 0000000000..895b0deff9
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
    @@ -0,0 +1,158 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package modes
    +
    +import (
    +	"reflect"
    +
    +	"github.com/fxamacker/cbor/v2"
    +)
    +
    +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry {
    +	var opts []func(*cbor.SimpleValueRegistry) error
    +	for sv := 0; sv <= 255; sv++ {
    +		// Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved
    +		// and considered ill-formed by the CBOR specification. We only accept false (20),
    +		// true (21), and null (22).
    +		switch sv {
    +		case 20: // false
    +		case 21: // true
    +		case 22: // null
    +		case 24, 25, 26, 27, 28, 29, 30, 31: // reserved
    +		default:
    +			opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv)))
    +		}
    +	}
    +	simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return simpleValues
    +}()
    +
    +var Decode cbor.DecMode = func() cbor.DecMode {
    +	decode, err := cbor.DecOptions{
    +		// Maps with duplicate keys are well-formed but invalid according to the CBOR spec
    +		// and never acceptable. Unlike the JSON serializer, inputs containing duplicate map
    +		// keys are rejected outright and not surfaced as a strict decoding error.
    +		DupMapKey: cbor.DupMapKeyEnforcedAPF,
    +
    +		// For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted
    +		// with or without tagging. If a tag number is present, it must be valid.
    +		TimeTag: cbor.DecTagOptional,
    +
    +		// Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit
    +		// is 10000.
    +		MaxNestedLevels: 64,
    +
    +		MaxArrayElements: 1024,
    +		MaxMapPairs:      1024,
    +
    +		// Indefinite-length sequences aren't produced by this serializer, but other
    +		// implementations can.
    +		IndefLength: cbor.IndefLengthAllowed,
    +
    +		// Accept inputs that contain CBOR tags.
    +		TagsMd: cbor.TagsAllowed,
    +
    +		// Decode type 0 (unsigned integer) as int64.
    +		// TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64.
    +		IntDec: cbor.IntDecConvertSignedOrFail,
    +
    +		// Disable producing map[cbor.ByteString]interface{}, which is not acceptable for
    +		// decodes into interface{}.
    +		MapKeyByteString: cbor.MapKeyByteStringForbidden,
    +
    +		// Error on map keys that don't map to a field in the destination struct.
    +		ExtraReturnErrors: cbor.ExtraDecErrorUnknownField,
    +
    +		// Decode maps into concrete type map[string]interface{} when the destination is an
    +		// interface{}.
    +		DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)),
    +
    +		// A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but
    +		// invalid according to the CBOR spec. Reject invalid inputs. Encoders are
    +		// responsible for ensuring that all text strings they produce contain valid UTF-8
    +		// sequences and may use the byte string major type to encode strings that have not
    +		// been validated.
    +		UTF8: cbor.UTF8RejectInvalid,
    +
    +		// Never make a case-insensitive match between a map key and a struct field.
    +		FieldNameMatching: cbor.FieldNameMatchingCaseSensitive,
    +
    +		// Produce string concrete values when decoding a CBOR byte string into interface{}.
    +		DefaultByteStringType: reflect.TypeOf(""),
    +
    +		// Allow CBOR byte strings to be decoded into string destination values. If a byte
    +		// string is enclosed in an "expected later encoding" tag
    +		// (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text
    +		// encoding indicated by that tag (e.g. base64) will be applied to the contents of
    +		// the byte string.
    +		ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding,
    +
    +		// Allow CBOR byte strings to match struct fields when appearing as a map key.
    +		FieldNameByteString: cbor.FieldNameByteStringAllowed,
    +
    +		// When decoding an unrecognized tag to interface{}, return the decoded tag content
    +		// instead of the default, a cbor.Tag representing a (number, content) pair.
    +		UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny,
    +
    +		// Decode time tags to interface{} as strings containing RFC 3339 timestamps.
    +		TimeTagToAny: cbor.TimeTagToRFC3339Nano,
    +
    +		// For parity with JSON, strings can be decoded into time.Time if they are RFC 3339
    +		// timestamps.
    +		ByteStringToTime: cbor.ByteStringToTimeAllowed,
    +
    +		// Reject NaN and infinite floating-point values since they don't have a JSON
    +		// representation (RFC 8259 Section 6).
    +		NaN: cbor.NaNDecodeForbidden,
    +		Inf: cbor.InfDecodeForbidden,
    +
    +		// When unmarshaling a byte string into a []byte, assume that the byte string
    +		// contains base64-encoded bytes, unless explicitly counterindicated by an "expected
    +		// later encoding" tag. This is consistent with the because of unmarshaling a JSON
    +		// text into a []byte.
    +		ByteStringExpectedFormat: cbor.ByteStringExpectedBase64,
    +
    +		// Reject the arbitrary-precision integer tags because they can't be faithfully
    +		// roundtripped through the allowable Unstructured types.
    +		BignumTag: cbor.BignumTagForbidden,
    +
    +		// Reject anything other than the simple values true, false, and null.
    +		SimpleValues: simpleValues,
    +
    +		// Disable default recognition of types implementing encoding.BinaryUnmarshaler,
    +		// which is not recognized for JSON decoding.
    +		BinaryUnmarshaler: cbor.BinaryUnmarshalerNone,
    +	}.DecMode()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return decode
    +}()
    +
    +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input.
    +var DecodeLax cbor.DecMode = func() cbor.DecMode {
    +	opts := Decode.DecOptions()
    +	opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit
    +	dm, err := opts.DecMode()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return dm
    +}()
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
    new file mode 100644
    index 0000000000..61f3f145f5
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
    @@ -0,0 +1,36 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package modes
    +
    +import (
    +	"github.com/fxamacker/cbor/v2"
    +)
    +
    +var Diagnostic cbor.DiagMode = func() cbor.DiagMode {
    +	opts := Decode.DecOptions()
    +	diagnostic, err := cbor.DiagOptions{
    +		ByteStringText: true,
    +
    +		MaxNestedLevels:  opts.MaxNestedLevels,
    +		MaxArrayElements: opts.MaxArrayElements,
    +		MaxMapPairs:      opts.MaxMapPairs,
    +	}.DiagMode()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return diagnostic
    +}()
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
    new file mode 100644
    index 0000000000..c669313844
    --- /dev/null
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
    @@ -0,0 +1,155 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package modes
    +
    +import (
    +	"io"
    +
    +	"github.com/fxamacker/cbor/v2"
    +)
    +
    +var Encode = EncMode{
    +	delegate: func() cbor.UserBufferEncMode {
    +		encode, err := cbor.EncOptions{
    +			// Map keys need to be sorted to have deterministic output, and this is the order
    +			// defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements".
    +			Sort: cbor.SortBytewiseLexical,
    +
    +			// CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store
    +			// floats in the smallest width that preserves value so that equivalent float32 and
    +			// float64 values encode to identical bytes, as they do in a JSON
    +			// encoding. Satisfies one of the "Core Deterministic Encoding Requirements".
    +			ShortestFloat: cbor.ShortestFloat16,
    +
    +			// Error on attempt to encode NaN and infinite values. This is what the JSON
    +			// serializer does.
    +			NaNConvert: cbor.NaNConvertReject,
    +			InfConvert: cbor.InfConvertReject,
    +
    +			// Error on attempt to encode math/big.Int values, which can't be faithfully
    +			// roundtripped through Unstructured in general (the dynamic numeric types allowed
    +			// in Unstructured are limited to float64 and int64).
    +			BigIntConvert: cbor.BigIntConvertReject,
    +
    +			// MarshalJSON for time.Time writes RFC3339 with nanos.
    +			Time: cbor.TimeRFC3339Nano,
    +
    +			// The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by
    +			// the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no
    +			// reliable way of knowing that a particular string originated from serializing a
    +			// time.Time), so producing tag 0 has little use.
    +			TimeTag: cbor.EncTagNone,
    +
    +			// Indefinite-length items have multiple encodings and aren't being used anyway, so
    +			// disable to avoid an opportunity for nondeterminism.
    +			IndefLength: cbor.IndefLengthForbidden,
    +
    +			// Preserve distinction between nil and empty for slices and maps.
    +			NilContainers: cbor.NilContainerAsNull,
    +
    +			// OK to produce tags.
    +			TagsMd: cbor.TagsAllowed,
    +
    +			// Use the same definition of "empty" as encoding/json.
    +			OmitEmpty: cbor.OmitEmptyGoValue,
    +
    +			// The CBOR types text string and byte string are structurally equivalent, with the
    +			// semantic difference that a text string whose content is an invalid UTF-8 sequence
    +			// is itself invalid. We reject all invalid text strings at decode time and do not
    +			// validate or sanitize all Go strings at encode time. Encoding Go strings to the
    +			// byte string type is comparable to the existing Protobuf behavior and cheaply
    +			// ensures that the output is valid CBOR.
    +			String: cbor.StringToByteString,
    +
    +			// Encode struct field names to the byte string type rather than the text string
    +			// type.
    +			FieldName: cbor.FieldNameToByteString,
    +
    +			// Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte
    +			// strings.
    +			ByteArray: cbor.ByteArrayToArray,
    +
    +			// Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64
    +			// encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to
    +			// interoperate with the existing JSON behavior. This indicates to the decoder that,
    +			// when decoding into a string (or unstructured), the resulting value should be the
    +			// base64 encoding of the original bytes. No base64 encoding or decoding needs to be
    +			// performed for []byte-to-CBOR-to-[]byte roundtrips.
    +			ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64,
    +
    +			// Disable default recognition of types implementing encoding.BinaryMarshaler, which
    +			// is not recognized for JSON encoding.
    +			BinaryMarshaler: cbor.BinaryMarshalerNone,
    +		}.UserBufferEncMode()
    +		if err != nil {
    +			panic(err)
    +		}
    +		return encode
    +	}(),
    +}
    +
    +var EncodeNondeterministic = EncMode{
    +	delegate: func() cbor.UserBufferEncMode {
    +		opts := Encode.options()
    +		opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0.
    +		em, err := opts.UserBufferEncMode()
    +		if err != nil {
    +			panic(err)
    +		}
    +		return em
    +	}(),
    +}
    +
    +type EncMode struct {
    +	delegate cbor.UserBufferEncMode
    +}
    +
    +func (em EncMode) options() cbor.EncOptions {
    +	return em.delegate.EncOptions()
    +}
    +
    +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error {
    +	if buf, ok := w.(*buffer); ok {
    +		return em.delegate.MarshalToBuffer(v, &buf.Buffer)
    +	}
    +
    +	buf := buffers.Get()
    +	defer buffers.Put(buf)
    +	if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil {
    +		return err
    +	}
    +
    +	if _, err := io.Copy(w, buf); err != nil {
    +		return err
    +	}
    +
    +	return nil
    +}
    +
    +func (em EncMode) Marshal(v interface{}) ([]byte, error) {
    +	buf := buffers.Get()
    +	defer buffers.Put(buf)
    +
    +	if err := em.MarshalTo(v, &buf.Buffer); err != nil {
    +		return nil, err
    +	}
    +
    +	clone := make([]byte, buf.Len())
    +	copy(clone, buf.Bytes())
    +
    +	return clone, nil
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
    index ce77c7910a..1680c149f9 100644
    --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go
    +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
    @@ -46,6 +46,7 @@ const (
     	ContentTypeJSON     string = "application/json"
     	ContentTypeYAML     string = "application/yaml"
     	ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
    +	ContentTypeCBOR     string = "application/cbor"
     )
     
     // RawExtension is used to hold extensions in external versions.
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
    index 9b3c9c8d5a..1ab8fd396e 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
    @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
     
     	// RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
     	// data written to data, or be larger than data and a different array.
    -	n := len(data)
     	m := json.RawMessage(data[:0])
     	if err := r.decoder.Decode(&m); err != nil {
     		return 0, err
    @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
     	// If capacity of data is less than length of the message, decoder will allocate a new slice
     	// and set m to it, which means we need to copy the partial result back into data and preserve
     	// the remaining result for subsequent reads.
    -	if len(m) > n {
    -		//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
    -		data = append(data[0:0], m[:n]...)
    -		r.remaining = m[n:]
    -		return n, io.ErrShortBuffer
    +	if len(m) > cap(data) {
    +		copy(data, m)
    +		r.remaining = m[len(data):]
    +		return len(data), io.ErrShortBuffer
     	}
    +
    +	if len(m) > len(data) {
    +		// The bytes beyond len(data) were stored in data's underlying array, which we do
    +		// not own after this function returns.
    +		r.remaining = append([]byte(nil), m[len(data):]...)
    +		return len(data), io.ErrShortBuffer
    +	}
    +
     	return len(m), nil
     }
     
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
    index a32fce5a0c..8054b98676 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
    @@ -116,6 +116,15 @@ func IsUpgradeFailure(err error) bool {
     	return errors.As(err, &upgradeErr)
     }
     
    +// isHTTPSProxyError returns true if error is Gorilla/Websockets HTTPS Proxy dial error;
    +// false otherwise (see https://github.com/kubernetes/kubernetes/issues/126134).
    +func IsHTTPSProxyError(err error) bool {
    +	if err == nil {
    +		return false
    +	}
    +	return strings.Contains(err.Error(), "proxy: unknown scheme: https")
    +}
    +
     // IsUpgradeRequest returns true if the given request is a connection upgrade request
     func IsUpgradeRequest(req *http.Request) bool {
     	for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
    index f358c794d1..5fd2e16c84 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
    @@ -25,6 +25,7 @@ import (
     	"strconv"
     	"strings"
     
    +	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
     	"k8s.io/klog/v2"
     )
     
    @@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
     	return json.Unmarshal(value, &intstr.IntVal)
     }
     
    +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error {
    +	if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil {
    +		intstr.Type = String
    +		return nil
    +	}
    +
    +	if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil {
    +		return err
    +	}
    +
    +	intstr.Type = Int
    +	return nil
    +}
    +
     // String returns the string value, or the Itoa of the int value.
     func (intstr *IntOrString) String() string {
     	if intstr == nil {
    @@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) {
     	}
     }
     
    +func (intstr IntOrString) MarshalCBOR() ([]byte, error) {
    +	switch intstr.Type {
    +	case Int:
    +		return cbor.Marshal(intstr.IntVal)
    +	case String:
    +		return cbor.Marshal(intstr.StrVal)
    +	default:
    +		return nil, fmt.Errorf("impossible IntOrString.Type")
    +	}
    +}
    +
     // OpenAPISchemaType is used by the kube-openapi generator when constructing
     // the OpenAPI spec of this type.
     //
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
    index 3674914f70..4fe0c5eb25 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
    @@ -17,6 +17,7 @@ limitations under the License.
     package runtime
     
     import (
    +	"context"
     	"fmt"
     	"net/http"
     	"runtime"
    @@ -35,7 +36,7 @@ var (
     )
     
     // PanicHandlers is a list of functions which will be invoked when a panic happens.
    -var PanicHandlers = []func(interface{}){logPanic}
    +var PanicHandlers = []func(context.Context, interface{}){logPanic}
     
     // HandleCrash simply catches a crash and logs an error. Meant to be called via
     // defer.  Additional context-specific handlers can be provided, and will be
    @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic}
     // handlers and logging the panic message.
     //
     // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
    +//
    +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
     func HandleCrash(additionalHandlers ...func(interface{})) {
     	if r := recover(); r != nil {
    -		for _, fn := range PanicHandlers {
    -			fn(r)
    -		}
    -		for _, fn := range additionalHandlers {
    -			fn(r)
    -		}
    -		if ReallyCrash {
    -			// Actually proceed to panic.
    -			panic(r)
    +		additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
    +		for i, handler := range additionalHandlers {
    +			handler := handler // capture loop variable
    +			additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) {
    +				handler(r)
    +			}
     		}
    +
    +		handleCrash(context.Background(), r, additionalHandlersWithContext...)
    +	}
    +}
    +
    +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via
    +// defer.  Additional context-specific handlers can be provided, and will be
    +// called in case of panic.  HandleCrash actually crashes, after calling the
    +// handlers and logging the panic message.
    +//
    +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
    +//
    +// The context is used to determine how to log.
    +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) {
    +	if r := recover(); r != nil {
    +		handleCrash(ctx, r, additionalHandlers...)
    +	}
    +}
    +
    +// handleCrash is the common implementation of HandleCrash and HandleCrash.
    +// Having those call a common implementation ensures that the stack depth
    +// is the same regardless through which path the handlers get invoked.
    +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
    +	for _, fn := range PanicHandlers {
    +		fn(ctx, r)
    +	}
    +	for _, fn := range additionalHandlers {
    +		fn(ctx, r)
    +	}
    +	if ReallyCrash {
    +		// Actually proceed to panic.
    +		panic(r)
     	}
     }
     
     // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
    -func logPanic(r interface{}) {
    +func logPanic(ctx context.Context, r interface{}) {
     	if r == http.ErrAbortHandler {
     		// honor the http.ErrAbortHandler sentinel panic value:
     		//   ErrAbortHandler is a sentinel panic value to abort a handler.
    @@ -73,10 +105,20 @@ func logPanic(r interface{}) {
     	const size = 64 << 10
     	stacktrace := make([]byte, size)
     	stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
    +
    +	// We don't really know how many call frames to skip because the Go
    +	// panic handler is between us and the code where the panic occurred.
    +	// If it's one function (as in Go 1.21), then skipping four levels
    +	// gets us to the function which called the `defer HandleCrashWithontext(...)`.
    +	logger := klog.FromContext(ctx).WithCallDepth(4)
    +
    +	// For backwards compatibility, conversion to string
    +	// is handled here instead of defering to the logging
    +	// backend.
     	if _, ok := r.(string); ok {
    -		klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
    +		logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace))
     	} else {
    -		klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
    +		logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace))
     	}
     }
     
    @@ -84,35 +126,76 @@ func logPanic(r interface{}) {
     // error occurs.
     // TODO(lavalamp): for testability, this and the below HandleError function
     // should be packaged up into a testable and reusable object.
    -var ErrorHandlers = []func(error){
    +var ErrorHandlers = []ErrorHandler{
     	logError,
    -	(&rudimentaryErrorBackoff{
    -		lastErrorTime: time.Now(),
    -		// 1ms was the number folks were able to stomach as a global rate limit.
    -		// If you need to log errors more than 1000 times a second you
    -		// should probably consider fixing your code instead. :)
    -		minPeriod: time.Millisecond,
    -	}).OnError,
    +	func(_ context.Context, _ error, _ string, _ ...interface{}) {
    +		(&rudimentaryErrorBackoff{
    +			lastErrorTime: time.Now(),
    +			// 1ms was the number folks were able to stomach as a global rate limit.
    +			// If you need to log errors more than 1000 times a second you
    +			// should probably consider fixing your code instead. :)
    +			minPeriod: time.Millisecond,
    +		}).OnError()
    +	},
     }
     
    +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{})
    +
     // HandlerError is a method to invoke when a non-user facing piece of code cannot
     // return an error and needs to indicate it has been ignored. Invoking this method
     // is preferable to logging the error - the default behavior is to log but the
     // errors may be sent to a remote server for analysis.
    +//
    +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
     func HandleError(err error) {
     	// this is sometimes called with a nil error.  We probably shouldn't fail and should do nothing instead
     	if err == nil {
     		return
     	}
     
    +	handleError(context.Background(), err, "Unhandled Error")
    +}
    +
    +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot
    +// return an error and needs to indicate it has been ignored. Invoking this method
    +// is preferable to logging the error - the default behavior is to log but the
    +// errors may be sent to a remote server for analysis. The context is used to
    +// determine how to log the error.
    +//
    +// If contextual logging is enabled, the default log output is equivalent to
    +//
    +//	logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...)
    +//
    +// Without contextual logging, it is equivalent to:
    +//
    +//	klog.ErrorS(err, msg, keysAndValues...)
    +//
    +// In contrast to HandleError, passing nil for the error is still going to
    +// trigger a log entry. Don't construct a new error or wrap an error
    +// with fmt.Errorf. Instead, add additional information via the mssage
    +// and key/value pairs.
    +//
    +// This variant should be used instead of HandleError because it supports
    +// structured, contextual logging.
    +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
    +	handleError(ctx, err, msg, keysAndValues...)
    +}
    +
    +// handleError is the common implementation of HandleError and HandleErrorWithContext.
    +// Using this common implementation ensures that the stack depth
    +// is the same regardless through which path the handlers get invoked.
    +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
     	for _, fn := range ErrorHandlers {
    -		fn(err)
    +		fn(ctx, err, msg, keysAndValues...)
     	}
     }
     
    -// logError prints an error with the call stack of the location it was reported
    -func logError(err error) {
    -	klog.ErrorDepth(2, err)
    +// logError prints an error with the call stack of the location it was reported.
    +// It expects to be called as  -> HandleError[WithContext] -> handleError -> logError.
    +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
    +	logger := klog.FromContext(ctx).WithCallDepth(3)
    +	logger = klog.LoggerWithName(logger, "UnhandledError")
    +	logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs.
     }
     
     type rudimentaryErrorBackoff struct {
    @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct {
     
     // OnError will block if it is called more often than the embedded period time.
     // This will prevent overly tight hot error loops.
    -func (r *rudimentaryErrorBackoff) OnError(error) {
    +func (r *rudimentaryErrorBackoff) OnError() {
     	now := time.Now() // start the timer before acquiring the lock
     	r.lastErrorTimeLock.Lock()
     	d := now.Sub(r.lastErrorTime)
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
    index b76129a1ca..cd961c8c59 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
    @@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] {
     // Clear empties the set.
     // It is preferable to replace the set with a newly constructed set,
     // but not all callers can do that (when there are other references to the map).
    -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
    -// can't be cleared because NaN can't be removed.
    -// For sets containing items of a type that is reflexive for ==,
    -// this is optimized to a single call to runtime.mapclear().
     func (s Set[T]) Clear() Set[T] {
    -	for key := range s {
    -		delete(s, key)
    -	}
    +	clear(s)
     	return s
     }
     
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
    index 920c113bbd..6825a808e6 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
    @@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
     		// original. Otherwise, check if we want to preserve it or skip it.
     		// Preserving the null value is useful when we want to send an explicit
     		// delete to the API server.
    +		// In some cases, this may lead to inconsistent behavior with create.
    +		// ref: https://github.com/kubernetes/kubernetes/issues/123304
    +		// To avoid breaking compatibility,
    +		// we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent.
     		if patchV == nil {
     			delete(original, k)
     			if mergeOptions.IgnoreUnmatchedNulls {
    diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
    index 2292ba1376..b7812ff2d1 100644
    --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go
    +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
    @@ -23,6 +23,8 @@ import (
     	"regexp"
     	"strconv"
     	"strings"
    +
    +	apimachineryversion "k8s.io/apimachinery/pkg/version"
     )
     
     // Version is an opaque representation of a version number
    @@ -31,6 +33,7 @@ type Version struct {
     	semver        bool
     	preRelease    string
     	buildMetadata string
    +	info          apimachineryversion.Info
     }
     
     var (
    @@ -145,6 +148,43 @@ func MustParseGeneric(str string) *Version {
     	return v
     }
     
    +// Parse tries to do ParseSemantic first to keep more information.
    +// If ParseSemantic fails, it would just do ParseGeneric.
    +func Parse(str string) (*Version, error) {
    +	v, err := parse(str, true)
    +	if err != nil {
    +		return parse(str, false)
    +	}
    +	return v, err
    +}
    +
    +// MustParse is like Parse except that it panics on error
    +func MustParse(str string) *Version {
    +	v, err := Parse(str)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return v
    +}
    +
    +// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version.
    +func ParseMajorMinor(str string) (*Version, error) {
    +	v, err := ParseGeneric(str)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return MajorMinor(v.Major(), v.Minor()), nil
    +}
    +
    +// MustParseMajorMinor is like ParseMajorMinor except that it panics on error
    +func MustParseMajorMinor(str string) *Version {
    +	v, err := ParseMajorMinor(str)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return v
    +}
    +
     // ParseSemantic parses a version string that exactly obeys the syntax and semantics of
     // the "Semantic Versioning" specification (http://semver.org/) (although it ignores
     // leading and trailing whitespace, and allows the version to be preceded by "v"). For
    @@ -215,6 +255,32 @@ func (v *Version) WithMinor(minor uint) *Version {
     	return &result
     }
     
    +// SubtractMinor returns the version with offset from the original minor, with the same major and no patch.
    +// If -offset >= current minor, the minor would be 0.
    +func (v *Version) OffsetMinor(offset int) *Version {
    +	var minor uint
    +	if offset >= 0 {
    +		minor = v.Minor() + uint(offset)
    +	} else {
    +		diff := uint(-offset)
    +		if diff < v.Minor() {
    +			minor = v.Minor() - diff
    +		}
    +	}
    +	return MajorMinor(v.Major(), minor)
    +}
    +
    +// SubtractMinor returns the version diff minor versions back, with the same major and no patch.
    +// If diff >= current minor, the minor would be 0.
    +func (v *Version) SubtractMinor(diff uint) *Version {
    +	return v.OffsetMinor(-int(diff))
    +}
    +
    +// AddMinor returns the version diff minor versions forward, with the same major and no patch.
    +func (v *Version) AddMinor(diff uint) *Version {
    +	return v.OffsetMinor(int(diff))
    +}
    +
     // WithPatch returns copy of the version object with requested patch number
     func (v *Version) WithPatch(patch uint) *Version {
     	result := *v
    @@ -224,6 +290,9 @@ func (v *Version) WithPatch(patch uint) *Version {
     
     // WithPreRelease returns copy of the version object with requested prerelease
     func (v *Version) WithPreRelease(preRelease string) *Version {
    +	if len(preRelease) == 0 {
    +		return v
    +	}
     	result := *v
     	result.components = []uint{v.Major(), v.Minor(), v.Patch()}
     	result.preRelease = preRelease
    @@ -345,6 +414,17 @@ func onlyZeros(array []uint) bool {
     	return true
     }
     
    +// EqualTo tests if a version is equal to a given version.
    +func (v *Version) EqualTo(other *Version) bool {
    +	if v == nil {
    +		return other == nil
    +	}
    +	if other == nil {
    +		return false
    +	}
    +	return v.compareInternal(other) == 0
    +}
    +
     // AtLeast tests if a version is at least equal to a given minimum version. If both
     // Versions are Semantic Versions, this will use the Semantic Version comparison
     // algorithm. Otherwise, it will compare only the numeric components, with non-present
    @@ -360,6 +440,11 @@ func (v *Version) LessThan(other *Version) bool {
     	return v.compareInternal(other) == -1
     }
     
    +// GreaterThan tests if a version is greater than a given version.
    +func (v *Version) GreaterThan(other *Version) bool {
    +	return v.compareInternal(other) == 1
    +}
    +
     // Compare compares v against a version string (which will be parsed as either Semantic
     // or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
     // it is greater than other, or 0 if they are equal.
    @@ -370,3 +455,30 @@ func (v *Version) Compare(other string) (int, error) {
     	}
     	return v.compareInternal(ov), nil
     }
    +
    +// WithInfo returns copy of the version object with requested info
    +func (v *Version) WithInfo(info apimachineryversion.Info) *Version {
    +	result := *v
    +	result.info = info
    +	return &result
    +}
    +
    +func (v *Version) Info() *apimachineryversion.Info {
    +	if v == nil {
    +		return nil
    +	}
    +	// in case info is empty, or the major and minor in info is different from the actual major and minor
    +	v.info.Major = itoa(v.Major())
    +	v.info.Minor = itoa(v.Minor())
    +	if v.info.GitVersion == "" {
    +		v.info.GitVersion = v.String()
    +	}
    +	return &v.info
    +}
    +
    +func itoa(i uint) string {
    +	if i == 0 {
    +		return ""
    +	}
    +	return strconv.Itoa(int(i))
    +}
    diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
    index b6c7bbfa8f..ce37fd8c18 100644
    --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
    +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
    @@ -27,13 +27,25 @@ import (
     
     // Interface can be implemented by anything that knows how to watch and report changes.
     type Interface interface {
    -	// Stop stops watching. Will close the channel returned by ResultChan(). Releases
    -	// any resources used by the watch.
    +	// Stop tells the producer that the consumer is done watching, so the
    +	// producer should stop sending events and close the result channel. The
    +	// consumer should keep watching for events until the result channel is
    +	// closed.
    +	//
    +	// Because some implementations may create channels when constructed, Stop
    +	// must always be called, even if the consumer has not yet called
    +	// ResultChan().
    +	//
    +	// Only the consumer should call Stop(), not the producer. If the producer
    +	// errors and needs to stop the watch prematurely, it should instead send
    +	// an error event and close the result channel.
     	Stop()
     
    -	// ResultChan returns a chan which will receive all the events. If an error occurs
    -	// or Stop() is called, the implementation will close this channel and
    -	// release any resources used by the watch.
    +	// ResultChan returns a channel which will receive events from the event
    +	// producer. If an error occurs or Stop() is called, the producer must
    +	// close this channel and release any resources used by the watch.
    +	// Closing the result channel tells the consumer that no more events will be
    +	// sent.
     	ResultChan() <-chan Event
     }
     
    @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event {
     func (pw *ProxyWatcher) StopChan() <-chan struct{} {
     	return pw.stopCh
     }
    +
    +// MockWatcher implements watch.Interface with mockable functions.
    +type MockWatcher struct {
    +	StopFunc       func()
    +	ResultChanFunc func() <-chan Event
    +}
    +
    +var _ Interface = &MockWatcher{}
    +
    +// Stop calls StopFunc
    +func (mw MockWatcher) Stop() {
    +	mw.StopFunc()
    +}
    +
    +// ResultChan calls ResultChanFunc
    +func (mw MockWatcher) ResultChan() <-chan Event {
    +	return mw.ResultChanFunc()
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/OWNERS b/vendor/k8s.io/client-go/applyconfigurations/OWNERS
    new file mode 100644
    index 0000000000..ea0928429d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/OWNERS
    @@ -0,0 +1,5 @@
    +# See the OWNERS docs at https://go.k8s.io/owners
    +
    +approvers:
    +  - apelisse
    +  - jpbetz
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
    index 64422c1df4..0d50d44ac2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use
    +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
     // with apply.
     type AuditAnnotationApplyConfiguration struct {
     	Key             *string `json:"key,omitempty"`
     	ValueExpression *string `json:"valueExpression,omitempty"`
     }
     
    -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with
    +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with
     // apply.
     func AuditAnnotation() *AuditAnnotationApplyConfiguration {
     	return &AuditAnnotationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
    index 38b7475cc4..1f890bcfcb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use
    +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
     // with apply.
     type ExpressionWarningApplyConfiguration struct {
     	FieldRef *string `json:"fieldRef,omitempty"`
     	Warning  *string `json:"warning,omitempty"`
     }
     
    -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with
    +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
     // apply.
     func ExpressionWarning() *ExpressionWarningApplyConfiguration {
     	return &ExpressionWarningApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
    index ea1dc377b9..d8a816f1e2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use
    +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
     // with apply.
     type MatchConditionApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with
    +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with
     // apply.
     func MatchCondition() *MatchConditionApplyConfiguration {
     	return &MatchConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
    index d8e9828947..e8e371d7dd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use
    +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
     // with apply.
     type MatchResourcesApplyConfiguration struct {
     	NamespaceSelector    *v1.LabelSelectorApplyConfiguration         `json:"namespaceSelector,omitempty"`
    @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct {
     	MatchPolicy          *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
     }
     
    -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with
    +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
     // apply.
     func MatchResources() *MatchResourcesApplyConfiguration {
     	return &MatchResourcesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
    index faff51a041..cd8096f902 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use
    +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use
     // with apply.
     type MutatingWebhookApplyConfiguration struct {
     	Name                    *string                                         `json:"name,omitempty"`
    @@ -40,7 +40,7 @@ type MutatingWebhookApplyConfiguration struct {
     	MatchConditions         []MatchConditionApplyConfiguration              `json:"matchConditions,omitempty"`
     }
     
    -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with
    +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with
     // apply.
     func MutatingWebhook() *MutatingWebhookApplyConfiguration {
     	return &MutatingWebhookApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
    index 61c8f667d2..58b71d6d58 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use
    +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use
     // with apply.
     type MutatingWebhookConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct {
     	Webhooks                         []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
     }
     
    -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with
    +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with
     // apply.
     func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration {
     	b := &MutatingWebhookConfigurationApplyConfiguration{}
    @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ...
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
    index be8d5206cb..eda3bf635a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
    @@ -22,14 +22,14 @@ import (
     	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
     )
     
    -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use
    +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
     // with apply.
     type NamedRuleWithOperationsApplyConfiguration struct {
     	ResourceNames                        []string `json:"resourceNames,omitempty"`
     	RuleWithOperationsApplyConfiguration `json:",inline"`
     }
     
    -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with
    +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with
     // apply.
     func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration {
     	return &NamedRuleWithOperationsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
    index b77a30cf91..07577929ab 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use
    +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
     // with apply.
     type ParamKindApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     	Kind       *string `json:"kind,omitempty"`
     }
     
    -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with
    +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
     // apply.
     func ParamKind() *ParamKindApplyConfiguration {
     	return &ParamKindApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
    index b52becda5e..73cda9b04d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use
    +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
     // with apply.
     type ParamRefApplyConfiguration struct {
     	Name                    *string                                              `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct {
     	ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
     }
     
    -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with
    +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with
     // apply.
     func ParamRef() *ParamRefApplyConfiguration {
     	return &ParamRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
    index 41d4179df4..36a93643c1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/admissionregistration/v1"
     )
     
    -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use
    +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use
     // with apply.
     type RuleApplyConfiguration struct {
     	APIGroups   []string      `json:"apiGroups,omitempty"`
    @@ -31,7 +31,7 @@ type RuleApplyConfiguration struct {
     	Scope       *v1.ScopeType `json:"scope,omitempty"`
     }
     
    -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with
    +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with
     // apply.
     func Rule() *RuleApplyConfiguration {
     	return &RuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
    index 59bbb8fe3d..92bddd502a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/admissionregistration/v1"
     )
     
    -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use
    +// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use
     // with apply.
     type RuleWithOperationsApplyConfiguration struct {
     	Operations             []v1.OperationType `json:"operations,omitempty"`
     	RuleApplyConfiguration `json:",inline"`
     }
     
    -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with
    +// RuleWithOperationsApplyConfiguration constructs a declarative configuration of the RuleWithOperations type for use with
     // apply.
     func RuleWithOperations() *RuleWithOperationsApplyConfiguration {
     	return &RuleWithOperationsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
    index 2cd55d9ea2..239780664d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use
    +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use
     // with apply.
     type ServiceReferenceApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
    @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct {
     	Port      *int32  `json:"port,omitempty"`
     }
     
    -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with
    +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with
     // apply.
     func ServiceReference() *ServiceReferenceApplyConfiguration {
     	return &ServiceReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
    index 8621ce71ec..723d10ecf5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use
    +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
     // with apply.
     type TypeCheckingApplyConfiguration struct {
     	ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
     }
     
    -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with
    +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with
     // apply.
     func TypeChecking() *TypeCheckingApplyConfiguration {
     	return &TypeCheckingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
    index fc96a8bdc6..841209cae1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use
    +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
     // with apply.
     type ValidatingAdmissionPolicyApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct {
     	Status                           *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with
    +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
     // apply.
     func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration {
     	b := &ValidatingAdmissionPolicyApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
    index 5bc41a0f52..1acad056f3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use
    +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	Spec                             *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
    +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
     // apply.
     func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
     	b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
    index da6ecbe371..eb426af42a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
     )
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	PolicyName        *string                                    `json:"policyName,omitempty"`
    @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
     // apply.
     func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
     	return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
    index eb930b9b1c..1635b30a61 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
     )
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use
    +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
     // with apply.
     type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	ParamKind        *ParamKindApplyConfiguration               `json:"paramKind,omitempty"`
    @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	Variables        []VariableApplyConfiguration               `json:"variables,omitempty"`
     }
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with
    +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
     // apply.
     func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration {
     	return &ValidatingAdmissionPolicySpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
    index 25cd67f08d..e6f4e84591 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
    @@ -22,7 +22,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use
    +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
     // with apply.
     type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	ObservedGeneration *int64                               `json:"observedGeneration,omitempty"`
    @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	Conditions         []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
    +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
     // apply.
     func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration {
     	return &ValidatingAdmissionPolicyStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
    index 613856bac7..a2c705eb5c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use
    +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use
     // with apply.
     type ValidatingWebhookApplyConfiguration struct {
     	Name                    *string                                    `json:"name,omitempty"`
    @@ -39,7 +39,7 @@ type ValidatingWebhookApplyConfiguration struct {
     	MatchConditions         []MatchConditionApplyConfiguration         `json:"matchConditions,omitempty"`
     }
     
    -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with
    +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with
     // apply.
     func ValidatingWebhook() *ValidatingWebhookApplyConfiguration {
     	return &ValidatingWebhookApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
    index 811bfdf0b6..0d1a6c81ae 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use
    +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use
     // with apply.
     type ValidatingWebhookConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct {
     	Webhooks                         []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
     }
     
    -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with
    +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with
     // apply.
     func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration {
     	b := &ValidatingWebhookConfigurationApplyConfiguration{}
    @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values .
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
    index ac29d14362..2a828b6b4f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use
    +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
     // with apply.
     type ValidationApplyConfiguration struct {
     	Expression        *string          `json:"expression,omitempty"`
    @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct {
     	MessageExpression *string          `json:"messageExpression,omitempty"`
     }
     
    -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with
    +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
     // apply.
     func Validation() *ValidationApplyConfiguration {
     	return &ValidationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
    index d55f29a38b..9dd20afa72 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use
    +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
     // with apply.
     type VariableApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with
    +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with
     // apply.
     func Variable() *VariableApplyConfiguration {
     	return &VariableApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
    index aa358ae205..77f2227b95 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use
    +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use
     // with apply.
     type WebhookClientConfigApplyConfiguration struct {
     	URL      *string                             `json:"url,omitempty"`
    @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct {
     	CABundle []byte                              `json:"caBundle,omitempty"`
     }
     
    -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with
    +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with
     // apply.
     func WebhookClientConfig() *WebhookClientConfigApplyConfiguration {
     	return &WebhookClientConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
    index 023695139d..958a537406 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use
    +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
     // with apply.
     type AuditAnnotationApplyConfiguration struct {
     	Key             *string `json:"key,omitempty"`
     	ValueExpression *string `json:"valueExpression,omitempty"`
     }
     
    -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with
    +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with
     // apply.
     func AuditAnnotation() *AuditAnnotationApplyConfiguration {
     	return &AuditAnnotationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
    index f8b511f512..f36c2f0f5c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use
    +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
     // with apply.
     type ExpressionWarningApplyConfiguration struct {
     	FieldRef *string `json:"fieldRef,omitempty"`
     	Warning  *string `json:"warning,omitempty"`
     }
     
    -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with
    +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
     // apply.
     func ExpressionWarning() *ExpressionWarningApplyConfiguration {
     	return &ExpressionWarningApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
    index 186c750f96..7f983dcb22 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use
    +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
     // with apply.
     type MatchConditionApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with
    +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with
     // apply.
     func MatchCondition() *MatchConditionApplyConfiguration {
     	return &MatchConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
    index a6710ac7ed..e443535b6a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use
    +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
     // with apply.
     type MatchResourcesApplyConfiguration struct {
     	NamespaceSelector    *v1.LabelSelectorApplyConfiguration            `json:"namespaceSelector,omitempty"`
    @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct {
     	MatchPolicy          *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"`
     }
     
    -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with
    +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
     // apply.
     func MatchResources() *MatchResourcesApplyConfiguration {
     	return &MatchResourcesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
    index bb2a7ba890..5e6744fd74 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
    @@ -23,14 +23,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
     )
     
    -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use
    +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
     // with apply.
     type NamedRuleWithOperationsApplyConfiguration struct {
     	ResourceNames                           []string `json:"resourceNames,omitempty"`
     	v1.RuleWithOperationsApplyConfiguration `json:",inline"`
     }
     
    -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with
    +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with
     // apply.
     func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration {
     	return &NamedRuleWithOperationsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
    index 350993cea0..daf17fb249 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use
    +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
     // with apply.
     type ParamKindApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     	Kind       *string `json:"kind,omitempty"`
     }
     
    -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with
    +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
     // apply.
     func ParamKind() *ParamKindApplyConfiguration {
     	return &ParamKindApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
    index 0951cae8a9..c4fff1d475 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use
    +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
     // with apply.
     type ParamRefApplyConfiguration struct {
     	Name                    *string                               `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct {
     	ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
     }
     
    -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with
    +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with
     // apply.
     func ParamRef() *ParamRefApplyConfiguration {
     	return &ParamRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
    index 42a9170710..d1a7fff50e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1alpha1
     
    -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use
    +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
     // with apply.
     type TypeCheckingApplyConfiguration struct {
     	ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
     }
     
    -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with
    +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with
     // apply.
     func TypeChecking() *TypeCheckingApplyConfiguration {
     	return &TypeCheckingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    index c860b85cf7..fe60eb5f25 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use
    +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
     // with apply.
     type ValidatingAdmissionPolicyApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct {
     	Status                           *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with
    +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
     // apply.
     func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration {
     	b := &ValidatingAdmissionPolicyApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    index dc08226404..0c11ee5945 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use
    +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	Spec                             *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
    +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
     // apply.
     func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
     	b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
    index c9a4ff7ab4..0f8e4e4357 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
     )
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	PolicyName        *string                                          `json:"policyName,omitempty"`
    @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
     // apply.
     func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
     	return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
    index 7ee320e428..d5d3529949 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
     )
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use
    +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
     // with apply.
     type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	ParamKind        *ParamKindApplyConfiguration                     `json:"paramKind,omitempty"`
    @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	Variables        []VariableApplyConfiguration                     `json:"variables,omitempty"`
     }
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with
    +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
     // apply.
     func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration {
     	return &ValidatingAdmissionPolicySpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
    index 821184c8a8..2fec5ba477 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use
    +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
     // with apply.
     type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	ObservedGeneration *int64                           `json:"observedGeneration,omitempty"`
    @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	Conditions         []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
    +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
     // apply.
     func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration {
     	return &ValidatingAdmissionPolicyStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
    index 9a5fc8475a..5f73043734 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use
    +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
     // with apply.
     type ValidationApplyConfiguration struct {
     	Expression        *string          `json:"expression,omitempty"`
    @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct {
     	MessageExpression *string          `json:"messageExpression,omitempty"`
     }
     
    -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with
    +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
     // apply.
     func Validation() *ValidationApplyConfiguration {
     	return &ValidationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
    index 2c70a8cfb5..0459dae655 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use
    +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
     // with apply.
     type VariableApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with
    +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with
     // apply.
     func Variable() *VariableApplyConfiguration {
     	return &VariableApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
    index e92fba0ddb..8718db9447 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use
    +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
     // with apply.
     type AuditAnnotationApplyConfiguration struct {
     	Key             *string `json:"key,omitempty"`
     	ValueExpression *string `json:"valueExpression,omitempty"`
     }
     
    -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with
    +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with
     // apply.
     func AuditAnnotation() *AuditAnnotationApplyConfiguration {
     	return &AuditAnnotationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
    index 059c1b94ba..66cfc8cdc7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use
    +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
     // with apply.
     type ExpressionWarningApplyConfiguration struct {
     	FieldRef *string `json:"fieldRef,omitempty"`
     	Warning  *string `json:"warning,omitempty"`
     }
     
    -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with
    +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
     // apply.
     func ExpressionWarning() *ExpressionWarningApplyConfiguration {
     	return &ExpressionWarningApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
    index d099b6b6ea..63db7fc801 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use
    +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
     // with apply.
     type MatchConditionApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with
    +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with
     // apply.
     func MatchCondition() *MatchConditionApplyConfiguration {
     	return &MatchConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
    index 25d4139db6..4005e55a33 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use
    +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
     // with apply.
     type MatchResourcesApplyConfiguration struct {
     	NamespaceSelector    *v1.LabelSelectorApplyConfiguration           `json:"namespaceSelector,omitempty"`
    @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct {
     	MatchPolicy          *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
     }
     
    -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with
    +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
     // apply.
     func MatchResources() *MatchResourcesApplyConfiguration {
     	return &MatchResourcesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
    index 54845341f4..b2ab76aefd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use
    +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use
     // with apply.
     type MutatingWebhookApplyConfiguration struct {
     	Name                    *string                                              `json:"name,omitempty"`
    @@ -41,7 +41,7 @@ type MutatingWebhookApplyConfiguration struct {
     	MatchConditions         []MatchConditionApplyConfiguration                   `json:"matchConditions,omitempty"`
     }
     
    -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with
    +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with
     // apply.
     func MutatingWebhook() *MutatingWebhookApplyConfiguration {
     	return &MutatingWebhookApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    index 10dd034e25..51bb823896 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use
    +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use
     // with apply.
     type MutatingWebhookConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct {
     	Webhooks                         []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
     }
     
    -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with
    +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with
     // apply.
     func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration {
     	b := &MutatingWebhookConfigurationApplyConfiguration{}
    @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ...
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
    index fa346c4a57..5de70c7ad3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
    @@ -23,14 +23,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
     )
     
    -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use
    +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
     // with apply.
     type NamedRuleWithOperationsApplyConfiguration struct {
     	ResourceNames                           []string `json:"resourceNames,omitempty"`
     	v1.RuleWithOperationsApplyConfiguration `json:",inline"`
     }
     
    -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with
    +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with
     // apply.
     func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration {
     	return &NamedRuleWithOperationsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
    index 6050e60251..3983125281 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use
    +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
     // with apply.
     type ParamKindApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     	Kind       *string `json:"kind,omitempty"`
     }
     
    -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with
    +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
     // apply.
     func ParamKind() *ParamKindApplyConfiguration {
     	return &ParamKindApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
    index 2be98dbc52..0a94ae0673 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use
    +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
     // with apply.
     type ParamRefApplyConfiguration struct {
     	Name                    *string                              `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct {
     	ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
     }
     
    -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with
    +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with
     // apply.
     func ParamRef() *ParamRefApplyConfiguration {
     	return &ParamRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
    index c21b574908..70cc6b5b27 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use
    +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use
     // with apply.
     type ServiceReferenceApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
    @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct {
     	Port      *int32  `json:"port,omitempty"`
     }
     
    -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with
    +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with
     // apply.
     func ServiceReference() *ServiceReferenceApplyConfiguration {
     	return &ServiceReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
    index 07baf334cd..cea6e11dee 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use
    +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
     // with apply.
     type TypeCheckingApplyConfiguration struct {
     	ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
     }
     
    -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with
    +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with
     // apply.
     func TypeChecking() *TypeCheckingApplyConfiguration {
     	return &TypeCheckingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
    index e144bc9f70..c29ee56cbe 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use
    +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
     // with apply.
     type ValidatingAdmissionPolicyApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct {
     	Status                           *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with
    +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
     // apply.
     func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration {
     	b := &ValidatingAdmissionPolicyApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    index 0dc06aedec..4347c4810c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use
    +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
     	Spec                             *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
    +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
     // apply.
     func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
     	b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
    index d20a78efff..bddc3a40c7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
     )
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
     // with apply.
     type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	PolicyName        *string                                         `json:"policyName,omitempty"`
    @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
     	ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
    +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
     // apply.
     func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
     	return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
    index c6e9389103..8b235337d7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
    @@ -22,7 +22,7 @@ import (
     	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
     )
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use
    +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
     // with apply.
     type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	ParamKind        *ParamKindApplyConfiguration                    `json:"paramKind,omitempty"`
    @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct {
     	Variables        []VariableApplyConfiguration                    `json:"variables,omitempty"`
     }
     
    -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with
    +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
     // apply.
     func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration {
     	return &ValidatingAdmissionPolicySpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
    index e3e6d417ed..4612af0cff 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use
    +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
     // with apply.
     type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	ObservedGeneration *int64                           `json:"observedGeneration,omitempty"`
    @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
     	Conditions         []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
    +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
     // apply.
     func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration {
     	return &ValidatingAdmissionPolicyStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
    index 8c5c341bad..1e107d68f7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use
    +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use
     // with apply.
     type ValidatingWebhookApplyConfiguration struct {
     	Name                    *string                                         `json:"name,omitempty"`
    @@ -40,7 +40,7 @@ type ValidatingWebhookApplyConfiguration struct {
     	MatchConditions         []MatchConditionApplyConfiguration              `json:"matchConditions,omitempty"`
     }
     
    -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with
    +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with
     // apply.
     func ValidatingWebhook() *ValidatingWebhookApplyConfiguration {
     	return &ValidatingWebhookApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    index 75f1b9d716..c3535c180c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use
    +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use
     // with apply.
     type ValidatingWebhookConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct {
     	Webhooks                         []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
     }
     
    -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with
    +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with
     // apply.
     func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration {
     	b := &ValidatingWebhookConfigurationApplyConfiguration{}
    @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values .
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
    index ed9ff1ac0c..019e8e7aa9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use
    +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
     // with apply.
     type ValidationApplyConfiguration struct {
     	Expression        *string          `json:"expression,omitempty"`
    @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct {
     	MessageExpression *string          `json:"messageExpression,omitempty"`
     }
     
    -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with
    +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
     // apply.
     func Validation() *ValidationApplyConfiguration {
     	return &ValidationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
    index 0fc294c65d..0ece197db2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use
    +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
     // with apply.
     type VariableApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	Expression *string `json:"expression,omitempty"`
     }
     
    -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with
    +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with
     // apply.
     func Variable() *VariableApplyConfiguration {
     	return &VariableApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
    index 490f9d5f3f..76ff71b4ae 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use
    +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use
     // with apply.
     type WebhookClientConfigApplyConfiguration struct {
     	URL      *string                             `json:"url,omitempty"`
    @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct {
     	CABundle []byte                              `json:"caBundle,omitempty"`
     }
     
    -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with
    +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with
     // apply.
     func WebhookClientConfig() *WebhookClientConfigApplyConfiguration {
     	return &WebhookClientConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
    index 81c56330bb..8394298b93 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// ServerStorageVersionApplyConfiguration represents an declarative configuration of the ServerStorageVersion type for use
    +// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use
     // with apply.
     type ServerStorageVersionApplyConfiguration struct {
     	APIServerID       *string  `json:"apiServerID,omitempty"`
    @@ -27,7 +27,7 @@ type ServerStorageVersionApplyConfiguration struct {
     	ServedVersions    []string `json:"servedVersions,omitempty"`
     }
     
    -// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with
    +// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with
     // apply.
     func ServerStorageVersion() *ServerStorageVersionApplyConfiguration {
     	return &ServerStorageVersionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
    index 6b9f178390..d734328b06 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StorageVersionApplyConfiguration represents an declarative configuration of the StorageVersion type for use
    +// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use
     // with apply.
     type StorageVersionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type StorageVersionApplyConfiguration struct {
     	Status                           *StorageVersionStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// StorageVersion constructs an declarative configuration of the StorageVersion type for use with
    +// StorageVersion constructs a declarative configuration of the StorageVersion type for use with
     // apply.
     func StorageVersion(name string) *StorageVersionApplyConfiguration {
     	b := &StorageVersionApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StorageVersionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
    index 75b6256478..68d894d0c7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// StorageVersionConditionApplyConfiguration represents an declarative configuration of the StorageVersionCondition type for use
    +// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use
     // with apply.
     type StorageVersionConditionApplyConfiguration struct {
     	Type               *v1alpha1.StorageVersionConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type StorageVersionConditionApplyConfiguration struct {
     	Message            *string                               `json:"message,omitempty"`
     }
     
    -// StorageVersionConditionApplyConfiguration constructs an declarative configuration of the StorageVersionCondition type for use with
    +// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with
     // apply.
     func StorageVersionCondition() *StorageVersionConditionApplyConfiguration {
     	return &StorageVersionConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
    index 43b0bf71b1..2e25d67524 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// StorageVersionStatusApplyConfiguration represents an declarative configuration of the StorageVersionStatus type for use
    +// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use
     // with apply.
     type StorageVersionStatusApplyConfiguration struct {
     	StorageVersions       []ServerStorageVersionApplyConfiguration    `json:"storageVersions,omitempty"`
    @@ -26,7 +26,7 @@ type StorageVersionStatusApplyConfiguration struct {
     	Conditions            []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// StorageVersionStatusApplyConfiguration constructs an declarative configuration of the StorageVersionStatus type for use with
    +// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with
     // apply.
     func StorageVersionStatus() *StorageVersionStatusApplyConfiguration {
     	return &StorageVersionStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
    index c4e2085078..25b6450591 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use
    +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
     // with apply.
     type ControllerRevisionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct {
     	Revision                         *int64                `json:"revision,omitempty"`
     }
     
    -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with
    +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
     // apply.
     func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration {
     	b := &ControllerRevisionApplyConfiguration{}
    @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
     	b.Revision = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ControllerRevisionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
    index cc9fdcd5dd..a157856514 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use
    +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
     // with apply.
     type DaemonSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct {
     	Status                           *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with
    +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
     // apply.
     func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
     	b := &DaemonSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DaemonSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
    index 283ae10a29..de91745b83 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use
    +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
     // with apply.
     type DaemonSetConditionApplyConfiguration struct {
     	Type               *v1.DaemonSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct {
     	Message            *string                    `json:"message,omitempty"`
     }
     
    -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with
    +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
     // apply.
     func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
     	return &DaemonSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
    index 5e808874b7..99dc5abae8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use
    +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
     // with apply.
     type DaemonSetSpecApplyConfiguration struct {
     	Selector             *v1.LabelSelectorApplyConfiguration        `json:"selector,omitempty"`
    @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct {
     	RevisionHistoryLimit *int32                                     `json:"revisionHistoryLimit,omitempty"`
     }
     
    -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with
    +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
     // apply.
     func DaemonSetSpec() *DaemonSetSpecApplyConfiguration {
     	return &DaemonSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
    index d1c4462aa9..a40dc16512 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use
    +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
     // with apply.
     type DaemonSetStatusApplyConfiguration struct {
     	CurrentNumberScheduled *int32                                 `json:"currentNumberScheduled,omitempty"`
    @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct {
     	Conditions             []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with
    +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
     // apply.
     func DaemonSetStatus() *DaemonSetStatusApplyConfiguration {
     	return &DaemonSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
    index f1ba18226f..15af4e66be 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/apps/v1"
     )
     
    -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use
    +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
     // with apply.
     type DaemonSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1.DaemonSetUpdateStrategyType           `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with
    +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with
     // apply.
     func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
     	return &DaemonSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
    index 13edda7727..52b7a21b71 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use
    +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
     // with apply.
     type DeploymentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct {
     	Status                           *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Deployment constructs an declarative configuration of the Deployment type for use with
    +// Deployment constructs a declarative configuration of the Deployment type for use with
     // apply.
     func Deployment(name, namespace string) *DeploymentApplyConfiguration {
     	b := &DeploymentApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DeploymentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
    index 7747044136..84df752bc1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use
    +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
     // with apply.
     type DeploymentConditionApplyConfiguration struct {
     	Type               *v1.DeploymentConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct {
     	Message            *string                     `json:"message,omitempty"`
     }
     
    -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with
    +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
     // apply.
     func DeploymentCondition() *DeploymentConditionApplyConfiguration {
     	return &DeploymentConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
    index 812253dae8..063f1c2765 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use
    +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
     // with apply.
     type DeploymentSpecApplyConfiguration struct {
     	Replicas                *int32                                    `json:"replicas,omitempty"`
    @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct {
     	ProgressDeadlineSeconds *int32                                    `json:"progressDeadlineSeconds,omitempty"`
     }
     
    -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with
    +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
     // apply.
     func DeploymentSpec() *DeploymentSpecApplyConfiguration {
     	return &DeploymentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
    index 7b48b42557..747813ade8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use
    +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
     // with apply.
     type DeploymentStatusApplyConfiguration struct {
     	ObservedGeneration  *int64                                  `json:"observedGeneration,omitempty"`
    @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct {
     	CollisionCount      *int32                                  `json:"collisionCount,omitempty"`
     }
     
    -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with
    +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
     // apply.
     func DeploymentStatus() *DeploymentStatusApplyConfiguration {
     	return &DeploymentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
    index e9571edab1..dc4b97c55a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/apps/v1"
     )
     
    -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use
    +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
     // with apply.
     type DeploymentStrategyApplyConfiguration struct {
     	Type          *v1.DeploymentStrategyType                 `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with
    +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with
     // apply.
     func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
     	return &DeploymentStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
    index 4e7818e535..35ca4e4dff 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use
    +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
     // with apply.
     type ReplicaSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct {
     	Status                           *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with
    +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
     // apply.
     func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
     	b := &ReplicaSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ReplicaSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
    index 19b0355d15..32da80842c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use
    +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
     // with apply.
     type ReplicaSetConditionApplyConfiguration struct {
     	Type               *v1.ReplicaSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct {
     	Message            *string                     `json:"message,omitempty"`
     }
     
    -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with
    +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
     // apply.
     func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
     	return &ReplicaSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
    index ca32865835..0390584867 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use
    +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
     // with apply.
     type ReplicaSetSpecApplyConfiguration struct {
     	Replicas        *int32                                    `json:"replicas,omitempty"`
    @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct {
     	Template        *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
     }
     
    -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with
    +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
     // apply.
     func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration {
     	return &ReplicaSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
    index 12f41490f9..a1408ae25e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use
    +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
     // with apply.
     type ReplicaSetStatusApplyConfiguration struct {
     	Replicas             *int32                                  `json:"replicas,omitempty"`
    @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct {
     	Conditions           []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with
    +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
     // apply.
     func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration {
     	return &ReplicaSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
    index ebe8e86d1f..e898f5081c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use
    +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
     // with apply.
     type RollingUpdateDaemonSetApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with
    +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
     // apply.
     func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration {
     	return &RollingUpdateDaemonSetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
    index ca9daaf249..2bc2937241 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use
    +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
     // with apply.
     type RollingUpdateDeploymentApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with
    +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
     // apply.
     func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration {
     	return &RollingUpdateDeploymentApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
    index c1b5dea855..dd0de81a6c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use
    +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
     // with apply.
     type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
     	Partition      *int32              `json:"partition,omitempty"`
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     }
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
    +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
     // apply.
     func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration {
     	return &RollingUpdateStatefulSetStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
    index 24041d99f8..6f2b340dab 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use
    +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
     // with apply.
     type StatefulSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct {
     	Status                           *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with
    +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
     // apply.
     func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
     	b := &StatefulSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StatefulSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
    index f9d47850d6..c62a5e854c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use
    +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
     // with apply.
     type StatefulSetConditionApplyConfiguration struct {
     	Type               *v1.StatefulSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct {
     	Message            *string                      `json:"message,omitempty"`
     }
     
    -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with
    +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
     // apply.
     func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
     	return &StatefulSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
    index 9778f1c4a0..86f39e16c1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use
    +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
     // with apply.
     type StatefulSetOrdinalsApplyConfiguration struct {
     	Start *int32 `json:"start,omitempty"`
     }
     
    -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with
    +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with
     // apply.
     func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration {
     	return &StatefulSetOrdinalsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
    index ba01d5d3c1..cd65fd4364 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/apps/v1"
     )
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
     // with apply.
     type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
     	WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
     	WhenScaled  *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
     }
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
     // apply.
     func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
     	return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
    index 81afdca596..1848a963cc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use
    +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
     // with apply.
     type StatefulSetSpecApplyConfiguration struct {
     	Replicas                             *int32                                                             `json:"replicas,omitempty"`
    @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct {
     	Ordinals                             *StatefulSetOrdinalsApplyConfiguration                             `json:"ordinals,omitempty"`
     }
     
    -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
    +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
     // apply.
     func StatefulSetSpec() *StatefulSetSpecApplyConfiguration {
     	return &StatefulSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
    index d88881b656..637a1c649d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use
    +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
     // with apply.
     type StatefulSetStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                   `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct {
     	AvailableReplicas  *int32                                   `json:"availableReplicas,omitempty"`
     }
     
    -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
    +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
     // apply.
     func StatefulSetStatus() *StatefulSetStatusApplyConfiguration {
     	return &StatefulSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
    index 5268a1e065..b59e107355 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/apps/v1"
     )
     
    -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use
    +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
     // with apply.
     type StatefulSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1.StatefulSetUpdateStrategyType                   `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with
    +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with
     // apply.
     func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
     	return &StatefulSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
    index 827c063598..606de58a1e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use
    +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
     // with apply.
     type ControllerRevisionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct {
     	Revision                         *int64                `json:"revision,omitempty"`
     }
     
    -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with
    +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
     // apply.
     func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration {
     	b := &ControllerRevisionApplyConfiguration{}
    @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
     	b.Revision = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ControllerRevisionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
    index e22f76b665..145aaed70d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use
    +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
     // with apply.
     type DeploymentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct {
     	Status                           *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Deployment constructs an declarative configuration of the Deployment type for use with
    +// Deployment constructs a declarative configuration of the Deployment type for use with
     // apply.
     func Deployment(name, namespace string) *DeploymentApplyConfiguration {
     	b := &DeploymentApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DeploymentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
    index 9da8ce0899..504dddd94e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use
    +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
     // with apply.
     type DeploymentConditionApplyConfiguration struct {
     	Type               *v1beta1.DeploymentConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with
    +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
     // apply.
     func DeploymentCondition() *DeploymentConditionApplyConfiguration {
     	return &DeploymentConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
    index 5e18476bdc..5531c756f9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use
    +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
     // with apply.
     type DeploymentSpecApplyConfiguration struct {
     	Replicas                *int32                                    `json:"replicas,omitempty"`
    @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct {
     	ProgressDeadlineSeconds *int32                                    `json:"progressDeadlineSeconds,omitempty"`
     }
     
    -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with
    +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
     // apply.
     func DeploymentSpec() *DeploymentSpecApplyConfiguration {
     	return &DeploymentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
    index f8d1cf5d25..adc023a34d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use
    +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
     // with apply.
     type DeploymentStatusApplyConfiguration struct {
     	ObservedGeneration  *int64                                  `json:"observedGeneration,omitempty"`
    @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct {
     	CollisionCount      *int32                                  `json:"collisionCount,omitempty"`
     }
     
    -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with
    +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
     // apply.
     func DeploymentStatus() *DeploymentStatusApplyConfiguration {
     	return &DeploymentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
    index 7279318a88..2c322b4ace 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/apps/v1beta1"
     )
     
    -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use
    +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
     // with apply.
     type DeploymentStrategyApplyConfiguration struct {
     	Type          *v1beta1.DeploymentStrategyType            `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with
    +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with
     // apply.
     func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
     	return &DeploymentStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
    index 131e57a39d..775f82eef8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use
    +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use
     // with apply.
     type RollbackConfigApplyConfiguration struct {
     	Revision *int64 `json:"revision,omitempty"`
     }
     
    -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with
    +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with
     // apply.
     func RollbackConfig() *RollbackConfigApplyConfiguration {
     	return &RollbackConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
    index dde5f064b0..244701a5e0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use
    +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
     // with apply.
     type RollingUpdateDeploymentApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with
    +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
     // apply.
     func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration {
     	return &RollingUpdateDeploymentApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
    index 8989a08d2c..94c2971343 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use
    +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
     // with apply.
     type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
     	Partition      *int32              `json:"partition,omitempty"`
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     }
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
    +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
     // apply.
     func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration {
     	return &RollingUpdateStatefulSetStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
    index ed5cfab41c..2705938862 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use
    +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
     // with apply.
     type StatefulSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct {
     	Status                           *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with
    +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
     // apply.
     func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
     	b := &StatefulSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StatefulSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
    index 97e994ab71..8a17391cd2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use
    +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
     // with apply.
     type StatefulSetConditionApplyConfiguration struct {
     	Type               *v1beta1.StatefulSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct {
     	Message            *string                           `json:"message,omitempty"`
     }
     
    -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with
    +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
     // apply.
     func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
     	return &StatefulSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
    index 8f349a2d27..2e3049e5e2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use
    +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
     // with apply.
     type StatefulSetOrdinalsApplyConfiguration struct {
     	Start *int32 `json:"start,omitempty"`
     }
     
    -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with
    +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with
     // apply.
     func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration {
     	return &StatefulSetOrdinalsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
    index 0048724c04..69a8ee0f0b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/apps/v1beta1"
     )
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
     // with apply.
     type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
     	WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
     	WhenScaled  *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
     }
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
     // apply.
     func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
     	return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
    index 1eb1ba7b03..ac325d717e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use
    +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
     // with apply.
     type StatefulSetSpecApplyConfiguration struct {
     	Replicas                             *int32                                                             `json:"replicas,omitempty"`
    @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct {
     	Ordinals                             *StatefulSetOrdinalsApplyConfiguration                             `json:"ordinals,omitempty"`
     }
     
    -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
    +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
     // apply.
     func StatefulSetSpec() *StatefulSetSpecApplyConfiguration {
     	return &StatefulSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
    index f31066b6ff..27ae7540fd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use
    +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
     // with apply.
     type StatefulSetStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                   `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct {
     	AvailableReplicas  *int32                                   `json:"availableReplicas,omitempty"`
     }
     
    -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
    +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
     // apply.
     func StatefulSetStatus() *StatefulSetStatusApplyConfiguration {
     	return &StatefulSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
    index 895c1e7f8a..7714ebbb72 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/apps/v1beta1"
     )
     
    -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use
    +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
     // with apply.
     type StatefulSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1beta1.StatefulSetUpdateStrategyType              `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with
    +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with
     // apply.
     func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
     	return &StatefulSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
    index 4abab6851c..5f75a45510 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use
    +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
     // with apply.
     type ControllerRevisionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct {
     	Revision                         *int64                `json:"revision,omitempty"`
     }
     
    -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with
    +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
     // apply.
     func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration {
     	b := &ControllerRevisionApplyConfiguration{}
    @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
     	b.Revision = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ControllerRevisionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
    index 906a8ca46e..9ffda6182e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use
    +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
     // with apply.
     type DaemonSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct {
     	Status                           *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with
    +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
     // apply.
     func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
     	b := &DaemonSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DaemonSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
    index 55dc1f4877..8315050f0f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use
    +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
     // with apply.
     type DaemonSetConditionApplyConfiguration struct {
     	Type               *v1beta2.DaemonSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct {
     	Message            *string                         `json:"message,omitempty"`
     }
     
    -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with
    +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
     // apply.
     func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
     	return &DaemonSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
    index 48137819af..74d8bf51c6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use
    +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
     // with apply.
     type DaemonSetSpecApplyConfiguration struct {
     	Selector             *v1.LabelSelectorApplyConfiguration        `json:"selector,omitempty"`
    @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct {
     	RevisionHistoryLimit *int32                                     `json:"revisionHistoryLimit,omitempty"`
     }
     
    -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with
    +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
     // apply.
     func DaemonSetSpec() *DaemonSetSpecApplyConfiguration {
     	return &DaemonSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
    index 29cda7a90e..6b0fda8953 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use
    +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
     // with apply.
     type DaemonSetStatusApplyConfiguration struct {
     	CurrentNumberScheduled *int32                                 `json:"currentNumberScheduled,omitempty"`
    @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct {
     	Conditions             []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with
    +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
     // apply.
     func DaemonSetStatus() *DaemonSetStatusApplyConfiguration {
     	return &DaemonSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
    index 07fc07fc6a..7d66f1da43 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta2 "k8s.io/api/apps/v1beta2"
     )
     
    -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use
    +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
     // with apply.
     type DaemonSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1beta2.DaemonSetUpdateStrategyType      `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with
    +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with
     // apply.
     func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
     	return &DaemonSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
    index 7e39e67510..485da788af 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use
    +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
     // with apply.
     type DeploymentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct {
     	Status                           *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Deployment constructs an declarative configuration of the Deployment type for use with
    +// Deployment constructs a declarative configuration of the Deployment type for use with
     // apply.
     func Deployment(name, namespace string) *DeploymentApplyConfiguration {
     	b := &DeploymentApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DeploymentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
    index 852a2c6832..1924278741 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use
    +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
     // with apply.
     type DeploymentConditionApplyConfiguration struct {
     	Type               *v1beta2.DeploymentConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with
    +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
     // apply.
     func DeploymentCondition() *DeploymentConditionApplyConfiguration {
     	return &DeploymentConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
    index 6898941ace..1b55130c6b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use
    +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
     // with apply.
     type DeploymentSpecApplyConfiguration struct {
     	Replicas                *int32                                    `json:"replicas,omitempty"`
    @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct {
     	ProgressDeadlineSeconds *int32                                    `json:"progressDeadlineSeconds,omitempty"`
     }
     
    -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with
    +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
     // apply.
     func DeploymentSpec() *DeploymentSpecApplyConfiguration {
     	return &DeploymentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
    index fe99ca9917..5fa9122332 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use
    +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
     // with apply.
     type DeploymentStatusApplyConfiguration struct {
     	ObservedGeneration  *int64                                  `json:"observedGeneration,omitempty"`
    @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct {
     	CollisionCount      *int32                                  `json:"collisionCount,omitempty"`
     }
     
    -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with
    +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
     // apply.
     func DeploymentStatus() *DeploymentStatusApplyConfiguration {
     	return &DeploymentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
    index 8714e153e4..c769436ee0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta2 "k8s.io/api/apps/v1beta2"
     )
     
    -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use
    +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
     // with apply.
     type DeploymentStrategyApplyConfiguration struct {
     	Type          *v1beta2.DeploymentStrategyType            `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with
    +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with
     // apply.
     func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
     	return &DeploymentStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
    index d9303e1b22..d8608aa51c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use
    +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
     // with apply.
     type ReplicaSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct {
     	Status                           *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with
    +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
     // apply.
     func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
     	b := &ReplicaSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ReplicaSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
    index 47776bfa2e..beec546f7c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use
    +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
     // with apply.
     type ReplicaSetConditionApplyConfiguration struct {
     	Type               *v1beta2.ReplicaSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with
    +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
     // apply.
     func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
     	return &ReplicaSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
    index 14d548169e..1d77b9e0fd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use
    +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
     // with apply.
     type ReplicaSetSpecApplyConfiguration struct {
     	Replicas        *int32                                    `json:"replicas,omitempty"`
    @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct {
     	Template        *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
     }
     
    -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with
    +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
     // apply.
     func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration {
     	return &ReplicaSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
    index 7c1b8fb29d..d3c92e274d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use
    +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
     // with apply.
     type ReplicaSetStatusApplyConfiguration struct {
     	Replicas             *int32                                  `json:"replicas,omitempty"`
    @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct {
     	Conditions           []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with
    +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
     // apply.
     func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration {
     	return &ReplicaSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
    index b586b678d4..ad6021d37a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use
    +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
     // with apply.
     type RollingUpdateDaemonSetApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with
    +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
     // apply.
     func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration {
     	return &RollingUpdateDaemonSetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
    index 78ef210081..b0cc3a4ee4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use
    +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
     // with apply.
     type RollingUpdateDeploymentApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with
    +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
     // apply.
     func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration {
     	return &RollingUpdateDeploymentApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
    index 4a12e51c0a..0046c264bb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use
    +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
     // with apply.
     type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
     	Partition      *int32              `json:"partition,omitempty"`
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     }
     
    -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
    +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with
     // apply.
     func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration {
     	return &RollingUpdateStatefulSetStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
    index 0e89668cb3..126ab2d8bd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
    @@ -25,7 +25,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
    +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
     // with apply.
     type ScaleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct {
     	Status                           *v1beta2.ScaleStatus `json:"status,omitempty"`
     }
     
    -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
    +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
     // apply.
     func Scale() *ScaleApplyConfiguration {
     	b := &ScaleApplyConfiguration{}
    @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp
     	b.Status = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ScaleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
    index 03d5428b4b..3d2b5d1917 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use
    +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
     // with apply.
     type StatefulSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct {
     	Status                           *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with
    +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
     // apply.
     func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
     	b := &StatefulSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StatefulSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
    index c33e68b5e2..aa45db686e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use
    +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
     // with apply.
     type StatefulSetConditionApplyConfiguration struct {
     	Type               *v1beta2.StatefulSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct {
     	Message            *string                           `json:"message,omitempty"`
     }
     
    -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with
    +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
     // apply.
     func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
     	return &StatefulSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
    index c586da775c..a899243a5a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use
    +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
     // with apply.
     type StatefulSetOrdinalsApplyConfiguration struct {
     	Start *int32 `json:"start,omitempty"`
     }
     
    -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with
    +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with
     // apply.
     func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration {
     	return &StatefulSetOrdinalsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
    index aee27803d3..318e5f4642 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta2 "k8s.io/api/apps/v1beta2"
     )
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
     // with apply.
     type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
     	WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
     	WhenScaled  *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
     }
     
    -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
    +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
     // apply.
     func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
     	return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
    index b6165fbd9a..bebf80c896 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use
    +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
     // with apply.
     type StatefulSetSpecApplyConfiguration struct {
     	Replicas                             *int32                                                             `json:"replicas,omitempty"`
    @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct {
     	Ordinals                             *StatefulSetOrdinalsApplyConfiguration                             `json:"ordinals,omitempty"`
     }
     
    -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
    +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
     // apply.
     func StatefulSetSpec() *StatefulSetSpecApplyConfiguration {
     	return &StatefulSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
    index 63835904c1..a647cd7d26 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use
    +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
     // with apply.
     type StatefulSetStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                   `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct {
     	AvailableReplicas  *int32                                   `json:"availableReplicas,omitempty"`
     }
     
    -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
    +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
     // apply.
     func StatefulSetStatus() *StatefulSetStatusApplyConfiguration {
     	return &StatefulSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
    index 03c2914917..81d4ba1df3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta2 "k8s.io/api/apps/v1beta2"
     )
     
    -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use
    +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
     // with apply.
     type StatefulSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1beta2.StatefulSetUpdateStrategyType              `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with
    +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with
     // apply.
     func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
     	return &StatefulSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
    index 0eac22692c..51ec665012 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use
    +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
     // with apply.
     type CrossVersionObjectReferenceApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
    @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     }
     
    -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with
    +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with
     // apply.
     func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration {
     	return &CrossVersionObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
    index 38fa205841..8150635ee6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use
    +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
     // with apply.
     type HorizontalPodAutoscalerApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct {
     	Status                           *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with
    +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
     // apply.
     func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration {
     	b := &HorizontalPodAutoscalerApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
    index 561ac60d35..0ca2f84ea9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use
    +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
     // with apply.
     type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	ScaleTargetRef                 *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
    @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	TargetCPUUtilizationPercentage *int32                                         `json:"targetCPUUtilizationPercentage,omitempty"`
     }
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with
    +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
     // apply.
     func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration {
     	return &HorizontalPodAutoscalerSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
    index abc2e05aa7..fcb231c3be 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use
    +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
     // with apply.
     type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	ObservedGeneration              *int64   `json:"observedGeneration,omitempty"`
    @@ -32,7 +32,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	CurrentCPUUtilizationPercentage *int32   `json:"currentCPUUtilizationPercentage,omitempty"`
     }
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with
    +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
     // apply.
     func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration {
     	return &HorizontalPodAutoscalerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
    index f770922803..40f3db8c5a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
    +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
     // with apply.
     type ScaleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -33,7 +33,7 @@ type ScaleApplyConfiguration struct {
     	Status                           *ScaleStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
    +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
     // apply.
     func Scale() *ScaleApplyConfiguration {
     	b := &ScaleApplyConfiguration{}
    @@ -215,3 +215,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ScaleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
    index 2339a8fef2..025004ba5f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use
    +// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use
     // with apply.
     type ScaleSpecApplyConfiguration struct {
     	Replicas *int32 `json:"replicas,omitempty"`
     }
     
    -// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with
    +// ScaleSpecApplyConfiguration constructs a declarative configuration of the ScaleSpec type for use with
     // apply.
     func ScaleSpec() *ScaleSpecApplyConfiguration {
     	return &ScaleSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
    index 81c8d1b30a..51f96d2357 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use
    +// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use
     // with apply.
     type ScaleStatusApplyConfiguration struct {
     	Replicas *int32  `json:"replicas,omitempty"`
     	Selector *string `json:"selector,omitempty"`
     }
     
    -// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with
    +// ScaleStatusApplyConfiguration constructs a declarative configuration of the ScaleStatus type for use with
     // apply.
     func ScaleStatus() *ScaleStatusApplyConfiguration {
     	return &ScaleStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
    index 15ef216d1b..b6e071e848 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use
    +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
     // with apply.
     type ContainerResourceMetricSourceApplyConfiguration struct {
     	Name      *v1.ResourceName                `json:"name,omitempty"`
    @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct {
     	Container *string                         `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with
    +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
     // apply.
     func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration {
     	return &ContainerResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
    index 34213bca3f..46bd2bac20 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use
    +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
     // with apply.
     type ContainerResourceMetricStatusApplyConfiguration struct {
     	Name      *v1.ResourceName                     `json:"name,omitempty"`
    @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct {
     	Container *string                              `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with
    +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
     // apply.
     func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration {
     	return &ContainerResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
    index 19045706dc..645f098577 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2
     
    -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use
    +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
     // with apply.
     type CrossVersionObjectReferenceApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
    @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     }
     
    -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with
    +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with
     // apply.
     func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration {
     	return &CrossVersionObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
    index 11a8eff263..a9c45b31a0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2
     
    -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use
    +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
     // with apply.
     type ExternalMetricSourceApplyConfiguration struct {
     	Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
     	Target *MetricTargetApplyConfiguration     `json:"target,omitempty"`
     }
     
    -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with
    +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
     // apply.
     func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration {
     	return &ExternalMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
    index 3b1a0329b8..4280086f5e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2
     
    -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use
    +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
     // with apply.
     type ExternalMetricStatusApplyConfiguration struct {
     	Metric  *MetricIdentifierApplyConfiguration  `json:"metric,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with
    +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with
     // apply.
     func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration {
     	return &ExternalMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
    index 31061de85e..e26b530c18 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use
    +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
     // with apply.
     type HorizontalPodAutoscalerApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct {
     	Status                           *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with
    +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
     // apply.
     func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration {
     	b := &HorizontalPodAutoscalerApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
    index e6fdabd7c8..05750cc21d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2
     
    -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use
    +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use
     // with apply.
     type HorizontalPodAutoscalerBehaviorApplyConfiguration struct {
     	ScaleUp   *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
     	ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"`
     }
     
    -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with
    +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with
     // apply.
     func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration {
     	return &HorizontalPodAutoscalerBehaviorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
    index c020eccd3d..844c6dc862 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use
    +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
     // with apply.
     type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Type               *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Message            *string                                  `json:"message,omitempty"`
     }
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with
    +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
     // apply.
     func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration {
     	return &HorizontalPodAutoscalerConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
    index c36bc3f225..e34ababc58 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use
    +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
     // with apply.
     type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration     `json:"scaleTargetRef,omitempty"`
    @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	Behavior       *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
     }
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with
    +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
     // apply.
     func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration {
     	return &HorizontalPodAutoscalerSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
    index d4d551df85..f1a2c3f4e9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use
    +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
     // with apply.
     type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                               `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	Conditions         []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with
    +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
     // apply.
     func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration {
     	return &HorizontalPodAutoscalerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
    index 139f0fb5c7..b8b735747b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
    @@ -22,7 +22,7 @@ import (
     	v2 "k8s.io/api/autoscaling/v2"
     )
     
    -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use
    +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
     // with apply.
     type HPAScalingPolicyApplyConfiguration struct {
     	Type          *v2.HPAScalingPolicyType `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct {
     	PeriodSeconds *int32                   `json:"periodSeconds,omitempty"`
     }
     
    -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with
    +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
     // apply.
     func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration {
     	return &HPAScalingPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
    index e768076aa4..c7020f77bd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
    @@ -22,7 +22,7 @@ import (
     	v2 "k8s.io/api/autoscaling/v2"
     )
     
    -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use
    +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
     // with apply.
     type HPAScalingRulesApplyConfiguration struct {
     	StabilizationWindowSeconds *int32                               `json:"stabilizationWindowSeconds,omitempty"`
    @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct {
     	Policies                   []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
     }
     
    -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with
    +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
     // apply.
     func HPAScalingRules() *HPAScalingRulesApplyConfiguration {
     	return &HPAScalingRulesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
    index 312ad3ddd6..2f99f7d0b4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use
    +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use
     // with apply.
     type MetricIdentifierApplyConfiguration struct {
     	Name     *string                             `json:"name,omitempty"`
     	Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
     }
     
    -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with
    +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with
     // apply.
     func MetricIdentifier() *MetricIdentifierApplyConfiguration {
     	return &MetricIdentifierApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
    index 094ead6c16..89e6b5c68b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
    @@ -22,7 +22,7 @@ import (
     	v2 "k8s.io/api/autoscaling/v2"
     )
     
    -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use
    +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
     // with apply.
     type MetricSpecApplyConfiguration struct {
     	Type              *v2.MetricSourceType                             `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct {
     	External          *ExternalMetricSourceApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with
    +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
     // apply.
     func MetricSpec() *MetricSpecApplyConfiguration {
     	return &MetricSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
    index c65ad446f0..86ae3348b6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
    @@ -22,7 +22,7 @@ import (
     	v2 "k8s.io/api/autoscaling/v2"
     )
     
    -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use
    +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
     // with apply.
     type MetricStatusApplyConfiguration struct {
     	Type              *v2.MetricSourceType                             `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct {
     	External          *ExternalMetricStatusApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with
    +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
     // apply.
     func MetricStatus() *MetricStatusApplyConfiguration {
     	return &MetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
    index f301e4d2be..bf68a1c346 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use
    +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
     // with apply.
     type MetricTargetApplyConfiguration struct {
     	Type               *v2.MetricTargetType `json:"type,omitempty"`
    @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct {
     	AverageUtilization *int32               `json:"averageUtilization,omitempty"`
     }
     
    -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with
    +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
     // apply.
     func MetricTarget() *MetricTargetApplyConfiguration {
     	return &MetricTargetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
    index e8474b1890..59732548b8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
    @@ -22,7 +22,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use
    +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use
     // with apply.
     type MetricValueStatusApplyConfiguration struct {
     	Value              *resource.Quantity `json:"value,omitempty"`
    @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct {
     	AverageUtilization *int32             `json:"averageUtilization,omitempty"`
     }
     
    -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with
    +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with
     // apply.
     func MetricValueStatus() *MetricValueStatusApplyConfiguration {
     	return &MetricValueStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
    index a9482565e0..2391fa5c22 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2
     
    -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use
    +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
     // with apply.
     type ObjectMetricSourceApplyConfiguration struct {
     	DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
    @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct {
     	Metric          *MetricIdentifierApplyConfiguration            `json:"metric,omitempty"`
     }
     
    -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with
    +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
     // apply.
     func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration {
     	return &ObjectMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
    index 70ba43bedd..9ffd0c180d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2
     
    -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use
    +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
     // with apply.
     type ObjectMetricStatusApplyConfiguration struct {
     	Metric          *MetricIdentifierApplyConfiguration            `json:"metric,omitempty"`
    @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct {
     	DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
     }
     
    -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with
    +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with
     // apply.
     func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration {
     	return &ObjectMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
    index 0a7a5c2595..28a35a2ae1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2
     
    -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use
    +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
     // with apply.
     type PodsMetricSourceApplyConfiguration struct {
     	Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
     	Target *MetricTargetApplyConfiguration     `json:"target,omitempty"`
     }
     
    -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with
    +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
     // apply.
     func PodsMetricSource() *PodsMetricSourceApplyConfiguration {
     	return &PodsMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
    index 865fcc33e3..4614282ce1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2
     
    -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use
    +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
     // with apply.
     type PodsMetricStatusApplyConfiguration struct {
     	Metric  *MetricIdentifierApplyConfiguration  `json:"metric,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with
    +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with
     // apply.
     func PodsMetricStatus() *PodsMetricStatusApplyConfiguration {
     	return &PodsMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
    index 25a065fef6..ffc9042b9f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use
    +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
     // with apply.
     type ResourceMetricSourceApplyConfiguration struct {
     	Name   *v1.ResourceName                `json:"name,omitempty"`
     	Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
     }
     
    -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with
    +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with
     // apply.
     func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration {
     	return &ResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
    index fb5625afab..0fdbfcb555 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use
    +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
     // with apply.
     type ResourceMetricStatusApplyConfiguration struct {
     	Name    *v1.ResourceName                     `json:"name,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with
    +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with
     // apply.
     func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration {
     	return &ResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
    index 2594e8e072..f41c5af10f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use
    +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
     // with apply.
     type ContainerResourceMetricSourceApplyConfiguration struct {
     	Name                     *v1.ResourceName   `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct {
     	Container                *string            `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with
    +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
     // apply.
     func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration {
     	return &ContainerResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
    index ae897237c4..4cd56eea37 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use
    +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
     // with apply.
     type ContainerResourceMetricStatusApplyConfiguration struct {
     	Name                      *v1.ResourceName   `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct {
     	Container                 *string            `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with
    +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
     // apply.
     func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration {
     	return &ContainerResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
    index fe3d15e866..f03261612e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta1
     
    -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use
    +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
     // with apply.
     type CrossVersionObjectReferenceApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
    @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     }
     
    -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with
    +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with
     // apply.
     func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration {
     	return &CrossVersionObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
    index c118e6ca1e..8dce4529dd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use
    +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
     // with apply.
     type ExternalMetricSourceApplyConfiguration struct {
     	MetricName         *string                             `json:"metricName,omitempty"`
    @@ -32,7 +32,7 @@ type ExternalMetricSourceApplyConfiguration struct {
     	TargetAverageValue *resource.Quantity                  `json:"targetAverageValue,omitempty"`
     }
     
    -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with
    +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
     // apply.
     func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration {
     	return &ExternalMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
    index ab771214e2..4034d7e55c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use
    +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
     // with apply.
     type ExternalMetricStatusApplyConfiguration struct {
     	MetricName          *string                             `json:"metricName,omitempty"`
    @@ -32,7 +32,7 @@ type ExternalMetricStatusApplyConfiguration struct {
     	CurrentAverageValue *resource.Quantity                  `json:"currentAverageValue,omitempty"`
     }
     
    -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with
    +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with
     // apply.
     func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration {
     	return &ExternalMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
    index 66b8d5f738..93e37eaffa 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use
    +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
     // with apply.
     type HorizontalPodAutoscalerApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct {
     	Status                           *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with
    +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
     // apply.
     func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration {
     	b := &HorizontalPodAutoscalerApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
    index de3e6ea5cd..8bb82298d1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use
    +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
     // with apply.
     type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Type               *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Message            *string                                       `json:"message,omitempty"`
     }
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with
    +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
     // apply.
     func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration {
     	return &HorizontalPodAutoscalerConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
    index 761d94a850..6f111ceafd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta1
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use
    +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
     // with apply.
     type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
    @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	Metrics        []MetricSpecApplyConfiguration                 `json:"metrics,omitempty"`
     }
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with
    +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
     // apply.
     func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration {
     	return &HorizontalPodAutoscalerSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
    index 95ec5be43b..391b577258 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use
    +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
     // with apply.
     type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                               `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	Conditions         []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with
    +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
     // apply.
     func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration {
     	return &HorizontalPodAutoscalerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
    index 70beec84e0..961e2c5b48 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
    @@ -22,7 +22,7 @@ import (
     	v2beta1 "k8s.io/api/autoscaling/v2beta1"
     )
     
    -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use
    +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
     // with apply.
     type MetricSpecApplyConfiguration struct {
     	Type              *v2beta1.MetricSourceType                        `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct {
     	External          *ExternalMetricSourceApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with
    +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
     // apply.
     func MetricSpec() *MetricSpecApplyConfiguration {
     	return &MetricSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
    index b03ea2f9e4..587b5a1f88 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
    @@ -22,7 +22,7 @@ import (
     	v2beta1 "k8s.io/api/autoscaling/v2beta1"
     )
     
    -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use
    +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
     // with apply.
     type MetricStatusApplyConfiguration struct {
     	Type              *v2beta1.MetricSourceType                        `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct {
     	External          *ExternalMetricStatusApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with
    +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
     // apply.
     func MetricStatus() *MetricStatusApplyConfiguration {
     	return &MetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
    index 07d467972e..a9e2eead4d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use
    +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
     // with apply.
     type ObjectMetricSourceApplyConfiguration struct {
     	Target       *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
    @@ -33,7 +33,7 @@ type ObjectMetricSourceApplyConfiguration struct {
     	AverageValue *resource.Quantity                             `json:"averageValue,omitempty"`
     }
     
    -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with
    +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
     // apply.
     func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration {
     	return &ObjectMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
    index b5e0d3e3d2..4d3be8df6c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use
    +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
     // with apply.
     type ObjectMetricStatusApplyConfiguration struct {
     	Target       *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
    @@ -33,7 +33,7 @@ type ObjectMetricStatusApplyConfiguration struct {
     	AverageValue *resource.Quantity                             `json:"averageValue,omitempty"`
     }
     
    -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with
    +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with
     // apply.
     func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration {
     	return &ObjectMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
    index a4122b8989..cfcd752e24 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use
    +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
     // with apply.
     type PodsMetricSourceApplyConfiguration struct {
     	MetricName         *string                             `json:"metricName,omitempty"`
    @@ -31,7 +31,7 @@ type PodsMetricSourceApplyConfiguration struct {
     	Selector           *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
     }
     
    -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with
    +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
     // apply.
     func PodsMetricSource() *PodsMetricSourceApplyConfiguration {
     	return &PodsMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
    index d6172011b7..f7a7777fd4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use
    +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
     // with apply.
     type PodsMetricStatusApplyConfiguration struct {
     	MetricName          *string                             `json:"metricName,omitempty"`
    @@ -31,7 +31,7 @@ type PodsMetricStatusApplyConfiguration struct {
     	Selector            *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
     }
     
    -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with
    +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with
     // apply.
     func PodsMetricStatus() *PodsMetricStatusApplyConfiguration {
     	return &PodsMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
    index 804f3f4926..ad97d83c3c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use
    +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
     // with apply.
     type ResourceMetricSourceApplyConfiguration struct {
     	Name                     *v1.ResourceName   `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type ResourceMetricSourceApplyConfiguration struct {
     	TargetAverageValue       *resource.Quantity `json:"targetAverageValue,omitempty"`
     }
     
    -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with
    +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with
     // apply.
     func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration {
     	return &ResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
    index 5fdc29c132..78fbeaad06 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use
    +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
     // with apply.
     type ResourceMetricStatusApplyConfiguration struct {
     	Name                      *v1.ResourceName   `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type ResourceMetricStatusApplyConfiguration struct {
     	CurrentAverageValue       *resource.Quantity `json:"currentAverageValue,omitempty"`
     }
     
    -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with
    +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with
     // apply.
     func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration {
     	return &ResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
    index aa334744ea..1050165ea3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use
    +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
     // with apply.
     type ContainerResourceMetricSourceApplyConfiguration struct {
     	Name      *v1.ResourceName                `json:"name,omitempty"`
    @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct {
     	Container *string                         `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with
    +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
     // apply.
     func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration {
     	return &ContainerResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
    index bf0822a066..708f68bc6b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use
    +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
     // with apply.
     type ContainerResourceMetricStatusApplyConfiguration struct {
     	Name      *v1.ResourceName                     `json:"name,omitempty"`
    @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct {
     	Container *string                              `json:"container,omitempty"`
     }
     
    -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with
    +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
     // apply.
     func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration {
     	return &ContainerResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
    index 2903629bc8..c281084b16 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta2
     
    -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use
    +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
     // with apply.
     type CrossVersionObjectReferenceApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
    @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     }
     
    -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with
    +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with
     // apply.
     func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration {
     	return &CrossVersionObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
    index 80053a6b33..d34ca11494 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2beta2
     
    -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use
    +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
     // with apply.
     type ExternalMetricSourceApplyConfiguration struct {
     	Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
     	Target *MetricTargetApplyConfiguration     `json:"target,omitempty"`
     }
     
    -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with
    +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
     // apply.
     func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration {
     	return &ExternalMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
    index 71ac35adbc..be29e607fa 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2beta2
     
    -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use
    +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
     // with apply.
     type ExternalMetricStatusApplyConfiguration struct {
     	Metric  *MetricIdentifierApplyConfiguration  `json:"metric,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with
    +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with
     // apply.
     func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration {
     	return &ExternalMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
    index 1c750cb164..ce666f0f3e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use
    +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
     // with apply.
     type HorizontalPodAutoscalerApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct {
     	Status                           *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with
    +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
     // apply.
     func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration {
     	b := &HorizontalPodAutoscalerApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
    index ec41bfadea..e9b1a9fb9e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2beta2
     
    -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use
    +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use
     // with apply.
     type HorizontalPodAutoscalerBehaviorApplyConfiguration struct {
     	ScaleUp   *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
     	ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"`
     }
     
    -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with
    +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with
     // apply.
     func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration {
     	return &HorizontalPodAutoscalerBehaviorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
    index 0f0cae75d3..a73e7ebaa8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use
    +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
     // with apply.
     type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Type               *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct {
     	Message            *string                                       `json:"message,omitempty"`
     }
     
    -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with
    +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
     // apply.
     func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration {
     	return &HorizontalPodAutoscalerConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
    index c60adee581..9629e4bd59 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta2
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use
    +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
     // with apply.
     type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration     `json:"scaleTargetRef,omitempty"`
    @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct {
     	Behavior       *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
     }
     
    -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with
    +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
     // apply.
     func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration {
     	return &HorizontalPodAutoscalerSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
    index 881a874e51..1eee645050 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use
    +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
     // with apply.
     type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	ObservedGeneration *int64                                               `json:"observedGeneration,omitempty"`
    @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct {
     	Conditions         []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with
    +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
     // apply.
     func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration {
     	return &HorizontalPodAutoscalerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
    index 2a535891af..b799f99e0d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
    @@ -22,7 +22,7 @@ import (
     	v2beta2 "k8s.io/api/autoscaling/v2beta2"
     )
     
    -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use
    +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
     // with apply.
     type HPAScalingPolicyApplyConfiguration struct {
     	Type          *v2beta2.HPAScalingPolicyType `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct {
     	PeriodSeconds *int32                        `json:"periodSeconds,omitempty"`
     }
     
    -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with
    +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
     // apply.
     func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration {
     	return &HPAScalingPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
    index 57c917b894..f7e8d9ae3e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
    @@ -22,7 +22,7 @@ import (
     	v2beta2 "k8s.io/api/autoscaling/v2beta2"
     )
     
    -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use
    +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
     // with apply.
     type HPAScalingRulesApplyConfiguration struct {
     	StabilizationWindowSeconds *int32                               `json:"stabilizationWindowSeconds,omitempty"`
    @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct {
     	Policies                   []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
     }
     
    -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with
    +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
     // apply.
     func HPAScalingRules() *HPAScalingRulesApplyConfiguration {
     	return &HPAScalingRulesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
    index 70cbd4e815..e8b2abb0e6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use
    +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use
     // with apply.
     type MetricIdentifierApplyConfiguration struct {
     	Name     *string                             `json:"name,omitempty"`
     	Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
     }
     
    -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with
    +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with
     // apply.
     func MetricIdentifier() *MetricIdentifierApplyConfiguration {
     	return &MetricIdentifierApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
    index 1e7ee1419d..3ec7108618 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
    @@ -22,7 +22,7 @@ import (
     	v2beta2 "k8s.io/api/autoscaling/v2beta2"
     )
     
    -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use
    +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
     // with apply.
     type MetricSpecApplyConfiguration struct {
     	Type              *v2beta2.MetricSourceType                        `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct {
     	External          *ExternalMetricSourceApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with
    +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
     // apply.
     func MetricSpec() *MetricSpecApplyConfiguration {
     	return &MetricSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
    index 353ec6d943..40d32795bc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
    @@ -22,7 +22,7 @@ import (
     	v2beta2 "k8s.io/api/autoscaling/v2beta2"
     )
     
    -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use
    +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
     // with apply.
     type MetricStatusApplyConfiguration struct {
     	Type              *v2beta2.MetricSourceType                        `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct {
     	External          *ExternalMetricStatusApplyConfiguration          `json:"external,omitempty"`
     }
     
    -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with
    +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
     // apply.
     func MetricStatus() *MetricStatusApplyConfiguration {
     	return &MetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
    index fbf006a5a6..aeec3102ee 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
    @@ -23,7 +23,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use
    +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
     // with apply.
     type MetricTargetApplyConfiguration struct {
     	Type               *v2beta2.MetricTargetType `json:"type,omitempty"`
    @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct {
     	AverageUtilization *int32                    `json:"averageUtilization,omitempty"`
     }
     
    -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with
    +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
     // apply.
     func MetricTarget() *MetricTargetApplyConfiguration {
     	return &MetricTargetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
    index 5796a0b4c1..cc409fc283 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
    @@ -22,7 +22,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use
    +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use
     // with apply.
     type MetricValueStatusApplyConfiguration struct {
     	Value              *resource.Quantity `json:"value,omitempty"`
    @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct {
     	AverageUtilization *int32             `json:"averageUtilization,omitempty"`
     }
     
    -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with
    +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with
     // apply.
     func MetricValueStatus() *MetricValueStatusApplyConfiguration {
     	return &MetricValueStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
    index eed31dab61..17b492fa06 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta2
     
    -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use
    +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
     // with apply.
     type ObjectMetricSourceApplyConfiguration struct {
     	DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
    @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct {
     	Metric          *MetricIdentifierApplyConfiguration            `json:"metric,omitempty"`
     }
     
    -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with
    +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
     // apply.
     func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration {
     	return &ObjectMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
    index 175e2120d6..e87417f2e7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v2beta2
     
    -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use
    +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
     // with apply.
     type ObjectMetricStatusApplyConfiguration struct {
     	Metric          *MetricIdentifierApplyConfiguration            `json:"metric,omitempty"`
    @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct {
     	DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
     }
     
    -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with
    +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with
     // apply.
     func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration {
     	return &ObjectMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
    index 0365880950..6ecbb18071 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2beta2
     
    -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use
    +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
     // with apply.
     type PodsMetricSourceApplyConfiguration struct {
     	Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
     	Target *MetricTargetApplyConfiguration     `json:"target,omitempty"`
     }
     
    -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with
    +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
     // apply.
     func PodsMetricSource() *PodsMetricSourceApplyConfiguration {
     	return &PodsMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
    index e6f98be8c4..cd10297261 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v2beta2
     
    -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use
    +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
     // with apply.
     type PodsMetricStatusApplyConfiguration struct {
     	Metric  *MetricIdentifierApplyConfiguration  `json:"metric,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with
    +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with
     // apply.
     func PodsMetricStatus() *PodsMetricStatusApplyConfiguration {
     	return &PodsMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
    index cc8118d5e3..c482d75f4b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use
    +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
     // with apply.
     type ResourceMetricSourceApplyConfiguration struct {
     	Name   *v1.ResourceName                `json:"name,omitempty"`
     	Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
     }
     
    -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with
    +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with
     // apply.
     func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration {
     	return &ResourceMetricSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
    index 0ab56be0f7..eb13e90b7d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use
    +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
     // with apply.
     type ResourceMetricStatusApplyConfiguration struct {
     	Name    *v1.ResourceName                     `json:"name,omitempty"`
     	Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
     }
     
    -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with
    +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with
     // apply.
     func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration {
     	return &ResourceMetricStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
    index 5225a5a079..8b26816e58 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use
    +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use
     // with apply.
     type CronJobApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct {
     	Status                           *CronJobStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// CronJob constructs an declarative configuration of the CronJob type for use with
    +// CronJob constructs a declarative configuration of the CronJob type for use with
     // apply.
     func CronJob(name, namespace string) *CronJobApplyConfiguration {
     	b := &CronJobApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CronJobApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
    index 22a34dcb61..62f9b5298b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/batch/v1"
     )
     
    -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use
    +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
     // with apply.
     type CronJobSpecApplyConfiguration struct {
     	Schedule                   *string                            `json:"schedule,omitempty"`
    @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct {
     	FailedJobsHistoryLimit     *int32                             `json:"failedJobsHistoryLimit,omitempty"`
     }
     
    -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with
    +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with
     // apply.
     func CronJobSpec() *CronJobSpecApplyConfiguration {
     	return &CronJobSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
    index b7cc2bdfb5..095dfe017f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use
    +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use
     // with apply.
     type CronJobStatusApplyConfiguration struct {
     	Active             []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
    @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct {
     	LastSuccessfulTime *metav1.Time                           `json:"lastSuccessfulTime,omitempty"`
     }
     
    -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with
    +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with
     // apply.
     func CronJobStatus() *CronJobStatusApplyConfiguration {
     	return &CronJobStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
    index fb10ba3968..1333e91844 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// JobApplyConfiguration represents an declarative configuration of the Job type for use
    +// JobApplyConfiguration represents a declarative configuration of the Job type for use
     // with apply.
     type JobApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type JobApplyConfiguration struct {
     	Status                           *JobStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Job constructs an declarative configuration of the Job type for use with
    +// Job constructs a declarative configuration of the Job type for use with
     // apply.
     func Job(name, namespace string) *JobApplyConfiguration {
     	b := &JobApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) *
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *JobApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
    index 388ca7a1c0..4f15bc6045 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// JobConditionApplyConfiguration represents an declarative configuration of the JobCondition type for use
    +// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use
     // with apply.
     type JobConditionApplyConfiguration struct {
     	Type               *v1.JobConditionType    `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type JobConditionApplyConfiguration struct {
     	Message            *string                 `json:"message,omitempty"`
     }
     
    -// JobConditionApplyConfiguration constructs an declarative configuration of the JobCondition type for use with
    +// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with
     // apply.
     func JobCondition() *JobConditionApplyConfiguration {
     	return &JobConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
    index bbcff71c86..2104fe113d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use
    +// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use
     // with apply.
     type JobSpecApplyConfiguration struct {
     	Parallelism             *int32                                    `json:"parallelism,omitempty"`
    @@ -45,7 +45,7 @@ type JobSpecApplyConfiguration struct {
     	ManagedBy               *string                                   `json:"managedBy,omitempty"`
     }
     
    -// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with
    +// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with
     // apply.
     func JobSpec() *JobSpecApplyConfiguration {
     	return &JobSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
    index e8e472f8f7..071a0153f5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
    @@ -22,7 +22,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use
    +// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use
     // with apply.
     type JobStatusApplyConfiguration struct {
     	Conditions              []JobConditionApplyConfiguration           `json:"conditions,omitempty"`
    @@ -38,7 +38,7 @@ type JobStatusApplyConfiguration struct {
     	Ready                   *int32                                     `json:"ready,omitempty"`
     }
     
    -// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with
    +// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with
     // apply.
     func JobStatus() *JobStatusApplyConfiguration {
     	return &JobStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
    index b37a815680..901c4228e0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
    @@ -24,14 +24,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use
    +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use
     // with apply.
     type JobTemplateSpecApplyConfiguration struct {
     	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
     	Spec                             *JobSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with
    +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with
     // apply.
     func JobTemplateSpec() *JobTemplateSpecApplyConfiguration {
     	return &JobTemplateSpecApplyConfiguration{}
    @@ -186,3 +186,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *JobTemplateSpecApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
    index 6da98386c9..05a68b3c94 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use
    +// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use
     // with apply.
     type PodFailurePolicyApplyConfiguration struct {
     	Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"`
     }
     
    -// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with
    +// PodFailurePolicyApplyConfiguration constructs a declarative configuration of the PodFailurePolicy type for use with
     // apply.
     func PodFailurePolicy() *PodFailurePolicyApplyConfiguration {
     	return &PodFailurePolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
    index 65f6251810..cd32296ca0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/batch/v1"
     )
     
    -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use
    +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use
     // with apply.
     type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct {
     	ContainerName *string                                 `json:"containerName,omitempty"`
    @@ -30,7 +30,7 @@ type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct {
     	Values        []int32                                 `json:"values,omitempty"`
     }
     
    -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with
    +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with
     // apply.
     func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
     	return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
    index da1556ff8b..07af4fb0e7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use
    +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use
     // with apply.
     type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct {
     	Type   *v1.PodConditionType `json:"type,omitempty"`
     	Status *v1.ConditionStatus  `json:"status,omitempty"`
     }
     
    -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with
    +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with
     // apply.
     func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
     	return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
    index d435243531..b004921d38 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/batch/v1"
     )
     
    -// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use
    +// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use
     // with apply.
     type PodFailurePolicyRuleApplyConfiguration struct {
     	Action          *v1.PodFailurePolicyAction                                 `json:"action,omitempty"`
    @@ -30,7 +30,7 @@ type PodFailurePolicyRuleApplyConfiguration struct {
     	OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"`
     }
     
    -// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with
    +// PodFailurePolicyRuleApplyConfiguration constructs a declarative configuration of the PodFailurePolicyRule type for use with
     // apply.
     func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration {
     	return &PodFailurePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
    index 327aa1f5a4..a3f4f39e2e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// SuccessPolicyApplyConfiguration represents an declarative configuration of the SuccessPolicy type for use
    +// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use
     // with apply.
     type SuccessPolicyApplyConfiguration struct {
     	Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"`
     }
     
    -// SuccessPolicyApplyConfiguration constructs an declarative configuration of the SuccessPolicy type for use with
    +// SuccessPolicyApplyConfiguration constructs a declarative configuration of the SuccessPolicy type for use with
     // apply.
     func SuccessPolicy() *SuccessPolicyApplyConfiguration {
     	return &SuccessPolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
    index 4c862e6821..2b5e3d91fe 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// SuccessPolicyRuleApplyConfiguration represents an declarative configuration of the SuccessPolicyRule type for use
    +// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use
     // with apply.
     type SuccessPolicyRuleApplyConfiguration struct {
     	SucceededIndexes *string `json:"succeededIndexes,omitempty"`
     	SucceededCount   *int32  `json:"succeededCount,omitempty"`
     }
     
    -// SuccessPolicyRuleApplyConfiguration constructs an declarative configuration of the SuccessPolicyRule type for use with
    +// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with
     // apply.
     func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration {
     	return &SuccessPolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
    index 1409303fff..ff6b57b86c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
    @@ -22,14 +22,14 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use
    +// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use
     // with apply.
     type UncountedTerminatedPodsApplyConfiguration struct {
     	Succeeded []types.UID `json:"succeeded,omitempty"`
     	Failed    []types.UID `json:"failed,omitempty"`
     }
     
    -// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with
    +// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with
     // apply.
     func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration {
     	return &UncountedTerminatedPodsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
    index 1d735a8407..765ed5e651 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use
    +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use
     // with apply.
     type CronJobApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct {
     	Status                           *CronJobStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// CronJob constructs an declarative configuration of the CronJob type for use with
    +// CronJob constructs a declarative configuration of the CronJob type for use with
     // apply.
     func CronJob(name, namespace string) *CronJobApplyConfiguration {
     	b := &CronJobApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CronJobApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
    index 68c0777de0..21043690da 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/batch/v1beta1"
     )
     
    -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use
    +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
     // with apply.
     type CronJobSpecApplyConfiguration struct {
     	Schedule                   *string                            `json:"schedule,omitempty"`
    @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct {
     	FailedJobsHistoryLimit     *int32                             `json:"failedJobsHistoryLimit,omitempty"`
     }
     
    -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with
    +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with
     // apply.
     func CronJobSpec() *CronJobSpecApplyConfiguration {
     	return &CronJobSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
    index 8dca14f663..335f9e0dce 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use
    +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use
     // with apply.
     type CronJobStatusApplyConfiguration struct {
     	Active             []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
    @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct {
     	LastSuccessfulTime *metav1.Time                           `json:"lastSuccessfulTime,omitempty"`
     }
     
    -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with
    +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with
     // apply.
     func CronJobStatus() *CronJobStatusApplyConfiguration {
     	return &CronJobStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
    index f925d65a7e..5fd2485c69 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
    @@ -25,14 +25,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use
    +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use
     // with apply.
     type JobTemplateSpecApplyConfiguration struct {
     	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
     	Spec                             *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with
    +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with
     // apply.
     func JobTemplateSpec() *JobTemplateSpecApplyConfiguration {
     	return &JobTemplateSpecApplyConfiguration{}
    @@ -187,3 +187,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *JobTemplateSpecApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
    index 3d02c0be80..e30bb62427 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use
    +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use
     // with apply.
     type CertificateSigningRequestApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct {
     	Status                           *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with
    +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with
     // apply.
     func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration {
     	b := &CertificateSigningRequestApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
    index 13d69cfcef..7a4bfce011 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use
    +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
     // with apply.
     type CertificateSigningRequestConditionApplyConfiguration struct {
     	Type               *v1.RequestConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct {
     	LastTransitionTime *metav1.Time             `json:"lastTransitionTime,omitempty"`
     }
     
    -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with
    +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
     // apply.
     func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration {
     	return &CertificateSigningRequestConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
    index 81ca214a9d..9c4a85693a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/certificates/v1"
     )
     
    -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use
    +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
     // with apply.
     type CertificateSigningRequestSpecApplyConfiguration struct {
     	Request           []byte                   `json:"request,omitempty"`
    @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct {
     	Extra             map[string]v1.ExtraValue `json:"extra,omitempty"`
     }
     
    -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with
    +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
     // apply.
     func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration {
     	return &CertificateSigningRequestSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
    index 59d5930331..897f6d1e98 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use
    +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use
     // with apply.
     type CertificateSigningRequestStatusApplyConfiguration struct {
     	Conditions  []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
     	Certificate []byte                                                 `json:"certificate,omitempty"`
     }
     
    -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with
    +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with
     // apply.
     func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration {
     	return &CertificateSigningRequestStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
    index 788d2a07dc..9cd10bc56a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use
    +// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use
     // with apply.
     type ClusterTrustBundleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ClusterTrustBundleApplyConfiguration struct {
     	Spec                             *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with
    +// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with
     // apply.
     func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration {
     	b := &ClusterTrustBundleApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterTrustBundleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
    index d1aea1d6dc..7bb36f7084 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use
    +// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use
     // with apply.
     type ClusterTrustBundleSpecApplyConfiguration struct {
     	SignerName  *string `json:"signerName,omitempty"`
     	TrustBundle *string `json:"trustBundle,omitempty"`
     }
     
    -// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with
    +// ClusterTrustBundleSpecApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleSpec type for use with
     // apply.
     func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration {
     	return &ClusterTrustBundleSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
    index 83a0edc18f..d6e08824a4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use
    +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use
     // with apply.
     type CertificateSigningRequestApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct {
     	Status                           *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with
    +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with
     // apply.
     func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration {
     	b := &CertificateSigningRequestApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
    index 2c32a3272c..6e3692d1c2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use
    +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
     // with apply.
     type CertificateSigningRequestConditionApplyConfiguration struct {
     	Type               *v1beta1.RequestConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct {
     	LastTransitionTime *metav1.Time                  `json:"lastTransitionTime,omitempty"`
     }
     
    -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with
    +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
     // apply.
     func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration {
     	return &CertificateSigningRequestConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
    index 9554b1f400..9284eca3a4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/certificates/v1beta1"
     )
     
    -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use
    +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
     // with apply.
     type CertificateSigningRequestSpecApplyConfiguration struct {
     	Request           []byte                        `json:"request,omitempty"`
    @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct {
     	Extra             map[string]v1beta1.ExtraValue `json:"extra,omitempty"`
     }
     
    -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with
    +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
     // apply.
     func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration {
     	return &CertificateSigningRequestSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
    index 9d8c5d4585..f82e8aed3b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use
    +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use
     // with apply.
     type CertificateSigningRequestStatusApplyConfiguration struct {
     	Conditions  []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
     	Certificate []byte                                                 `json:"certificate,omitempty"`
     }
     
    -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with
    +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with
     // apply.
     func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration {
     	return &CertificateSigningRequestStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
    index 618f12fb21..ffd84583f4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use
    +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use
     // with apply.
     type LeaseApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct {
     	Spec                             *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// Lease constructs an declarative configuration of the Lease type for use with
    +// Lease constructs a declarative configuration of the Lease type for use with
     // apply.
     func Lease(name, namespace string) *LeaseApplyConfiguration {
     	b := &LeaseApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) *
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *LeaseApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
    index a5f6a6ebba..01d0df1380 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
    @@ -19,20 +19,23 @@ limitations under the License.
     package v1
     
     import (
    +	coordinationv1 "k8s.io/api/coordination/v1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use
    +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use
     // with apply.
     type LeaseSpecApplyConfiguration struct {
    -	HolderIdentity       *string       `json:"holderIdentity,omitempty"`
    -	LeaseDurationSeconds *int32        `json:"leaseDurationSeconds,omitempty"`
    -	AcquireTime          *v1.MicroTime `json:"acquireTime,omitempty"`
    -	RenewTime            *v1.MicroTime `json:"renewTime,omitempty"`
    -	LeaseTransitions     *int32        `json:"leaseTransitions,omitempty"`
    +	HolderIdentity       *string                                  `json:"holderIdentity,omitempty"`
    +	LeaseDurationSeconds *int32                                   `json:"leaseDurationSeconds,omitempty"`
    +	AcquireTime          *v1.MicroTime                            `json:"acquireTime,omitempty"`
    +	RenewTime            *v1.MicroTime                            `json:"renewTime,omitempty"`
    +	LeaseTransitions     *int32                                   `json:"leaseTransitions,omitempty"`
    +	Strategy             *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
    +	PreferredHolder      *string                                  `json:"preferredHolder,omitempty"`
     }
     
    -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with
    +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with
     // apply.
     func LeaseSpec() *LeaseSpecApplyConfiguration {
     	return &LeaseSpecApplyConfiguration{}
    @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp
     	b.LeaseTransitions = &value
     	return b
     }
    +
    +// WithStrategy sets the Strategy field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Strategy field is set to the value of the last call.
    +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration {
    +	b.Strategy = &value
    +	return b
    +}
    +
    +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the PreferredHolder field is set to the value of the last call.
    +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration {
    +	b.PreferredHolder = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go
    new file mode 100644
    index 0000000000..ef76847791
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go
    @@ -0,0 +1,255 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use
    +// with apply.
    +type LeaseCandidateApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"`
    +}
    +
    +// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with
    +// apply.
    +func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration {
    +	b := &LeaseCandidateApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithNamespace(namespace)
    +	b.WithKind("LeaseCandidate")
    +	b.WithAPIVersion("coordination.k8s.io/v1alpha1")
    +	return b
    +}
    +
    +// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from
    +// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a
    +// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API.
    +// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
    +	return extractLeaseCandidate(leaseCandidate, fieldManager, "")
    +}
    +
    +// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
    +	return extractLeaseCandidate(leaseCandidate, fieldManager, "status")
    +}
    +
    +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
    +	b := &LeaseCandidateApplyConfiguration{}
    +	err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(leaseCandidate.Name)
    +	b.WithNamespace(leaseCandidate.Namespace)
    +
    +	b.WithKind("LeaseCandidate")
    +	b.WithAPIVersion("coordination.k8s.io/v1alpha1")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *LeaseCandidateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApplyConfiguration) *LeaseCandidateApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *LeaseCandidateApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go
    new file mode 100644
    index 0000000000..61d3dca10b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go
    @@ -0,0 +1,91 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	coordinationv1 "k8s.io/api/coordination/v1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +)
    +
    +// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use
    +// with apply.
    +type LeaseCandidateSpecApplyConfiguration struct {
    +	LeaseName           *string                                   `json:"leaseName,omitempty"`
    +	PingTime            *v1.MicroTime                             `json:"pingTime,omitempty"`
    +	RenewTime           *v1.MicroTime                             `json:"renewTime,omitempty"`
    +	BinaryVersion       *string                                   `json:"binaryVersion,omitempty"`
    +	EmulationVersion    *string                                   `json:"emulationVersion,omitempty"`
    +	PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"`
    +}
    +
    +// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with
    +// apply.
    +func LeaseCandidateSpec() *LeaseCandidateSpecApplyConfiguration {
    +	return &LeaseCandidateSpecApplyConfiguration{}
    +}
    +
    +// WithLeaseName sets the LeaseName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the LeaseName field is set to the value of the last call.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithLeaseName(value string) *LeaseCandidateSpecApplyConfiguration {
    +	b.LeaseName = &value
    +	return b
    +}
    +
    +// WithPingTime sets the PingTime field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the PingTime field is set to the value of the last call.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithPingTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration {
    +	b.PingTime = &value
    +	return b
    +}
    +
    +// WithRenewTime sets the RenewTime field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the RenewTime field is set to the value of the last call.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration {
    +	b.RenewTime = &value
    +	return b
    +}
    +
    +// WithBinaryVersion sets the BinaryVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the BinaryVersion field is set to the value of the last call.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithBinaryVersion(value string) *LeaseCandidateSpecApplyConfiguration {
    +	b.BinaryVersion = &value
    +	return b
    +}
    +
    +// WithEmulationVersion sets the EmulationVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the EmulationVersion field is set to the value of the last call.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string) *LeaseCandidateSpecApplyConfiguration {
    +	b.EmulationVersion = &value
    +	return b
    +}
    +
    +// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the PreferredStrategies field.
    +func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration {
    +	for i := range values {
    +		b.PreferredStrategies = append(b.PreferredStrategies, values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
    index 867e0f58ba..9aa0703e8e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use
    +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use
     // with apply.
     type LeaseApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct {
     	Spec                             *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// Lease constructs an declarative configuration of the Lease type for use with
    +// Lease constructs a declarative configuration of the Lease type for use with
     // apply.
     func Lease(name, namespace string) *LeaseApplyConfiguration {
     	b := &LeaseApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) *
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *LeaseApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
    index 865eb76455..8c7fddfc61 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
    @@ -19,20 +19,23 @@ limitations under the License.
     package v1beta1
     
     import (
    +	coordinationv1 "k8s.io/api/coordination/v1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use
    +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use
     // with apply.
     type LeaseSpecApplyConfiguration struct {
    -	HolderIdentity       *string       `json:"holderIdentity,omitempty"`
    -	LeaseDurationSeconds *int32        `json:"leaseDurationSeconds,omitempty"`
    -	AcquireTime          *v1.MicroTime `json:"acquireTime,omitempty"`
    -	RenewTime            *v1.MicroTime `json:"renewTime,omitempty"`
    -	LeaseTransitions     *int32        `json:"leaseTransitions,omitempty"`
    +	HolderIdentity       *string                                  `json:"holderIdentity,omitempty"`
    +	LeaseDurationSeconds *int32                                   `json:"leaseDurationSeconds,omitempty"`
    +	AcquireTime          *v1.MicroTime                            `json:"acquireTime,omitempty"`
    +	RenewTime            *v1.MicroTime                            `json:"renewTime,omitempty"`
    +	LeaseTransitions     *int32                                   `json:"leaseTransitions,omitempty"`
    +	Strategy             *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
    +	PreferredHolder      *string                                  `json:"preferredHolder,omitempty"`
     }
     
    -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with
    +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with
     // apply.
     func LeaseSpec() *LeaseSpecApplyConfiguration {
     	return &LeaseSpecApplyConfiguration{}
    @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp
     	b.LeaseTransitions = &value
     	return b
     }
    +
    +// WithStrategy sets the Strategy field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Strategy field is set to the value of the last call.
    +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration {
    +	b.Strategy = &value
    +	return b
    +}
    +
    +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the PreferredHolder field is set to the value of the last call.
    +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration {
    +	b.PreferredHolder = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
    index df6d1c64e5..45484f140d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// AffinityApplyConfiguration represents an declarative configuration of the Affinity type for use
    +// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use
     // with apply.
     type AffinityApplyConfiguration struct {
     	NodeAffinity    *NodeAffinityApplyConfiguration    `json:"nodeAffinity,omitempty"`
    @@ -26,7 +26,7 @@ type AffinityApplyConfiguration struct {
     	PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"`
     }
     
    -// AffinityApplyConfiguration constructs an declarative configuration of the Affinity type for use with
    +// AffinityApplyConfiguration constructs a declarative configuration of the Affinity type for use with
     // apply.
     func Affinity() *AffinityApplyConfiguration {
     	return &AffinityApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
    index 7f3c22afa1..1d698fd610 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// AppArmorProfileApplyConfiguration represents an declarative configuration of the AppArmorProfile type for use
    +// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use
     // with apply.
     type AppArmorProfileApplyConfiguration struct {
     	Type             *v1.AppArmorProfileType `json:"type,omitempty"`
     	LocalhostProfile *string                 `json:"localhostProfile,omitempty"`
     }
     
    -// AppArmorProfileApplyConfiguration constructs an declarative configuration of the AppArmorProfile type for use with
    +// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with
     // apply.
     func AppArmorProfile() *AppArmorProfileApplyConfiguration {
     	return &AppArmorProfileApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
    index 970bf24c45..e4c2fff3f6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// AttachedVolumeApplyConfiguration represents an declarative configuration of the AttachedVolume type for use
    +// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use
     // with apply.
     type AttachedVolumeApplyConfiguration struct {
     	Name       *v1.UniqueVolumeName `json:"name,omitempty"`
     	DevicePath *string              `json:"devicePath,omitempty"`
     }
     
    -// AttachedVolumeApplyConfiguration constructs an declarative configuration of the AttachedVolume type for use with
    +// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with
     // apply.
     func AttachedVolume() *AttachedVolumeApplyConfiguration {
     	return &AttachedVolumeApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
    index 6ff335e9d6..d08786965e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use
    +// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use
     // with apply.
     type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct {
     	VolumeID  *string `json:"volumeID,omitempty"`
    @@ -27,7 +27,7 @@ type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct {
     	ReadOnly  *bool   `json:"readOnly,omitempty"`
     }
     
    -// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with
    +// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with
     // apply.
     func AWSElasticBlockStoreVolumeSource() *AWSElasticBlockStoreVolumeSourceApplyConfiguration {
     	return &AWSElasticBlockStoreVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
    index b2774735ae..40ad5ac78f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// AzureDiskVolumeSourceApplyConfiguration represents an declarative configuration of the AzureDiskVolumeSource type for use
    +// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use
     // with apply.
     type AzureDiskVolumeSourceApplyConfiguration struct {
     	DiskName    *string                      `json:"diskName,omitempty"`
    @@ -33,7 +33,7 @@ type AzureDiskVolumeSourceApplyConfiguration struct {
     	Kind        *v1.AzureDataDiskKind        `json:"kind,omitempty"`
     }
     
    -// AzureDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureDiskVolumeSource type for use with
    +// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with
     // apply.
     func AzureDiskVolumeSource() *AzureDiskVolumeSourceApplyConfiguration {
     	return &AzureDiskVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
    index f173938334..70a6b17be8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// AzureFilePersistentVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFilePersistentVolumeSource type for use
    +// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use
     // with apply.
     type AzureFilePersistentVolumeSourceApplyConfiguration struct {
     	SecretName      *string `json:"secretName,omitempty"`
    @@ -27,7 +27,7 @@ type AzureFilePersistentVolumeSourceApplyConfiguration struct {
     	SecretNamespace *string `json:"secretNamespace,omitempty"`
     }
     
    -// AzureFilePersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFilePersistentVolumeSource type for use with
    +// AzureFilePersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFilePersistentVolumeSource type for use with
     // apply.
     func AzureFilePersistentVolumeSource() *AzureFilePersistentVolumeSourceApplyConfiguration {
     	return &AzureFilePersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
    index a7f7f33d88..ff0c867919 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// AzureFileVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFileVolumeSource type for use
    +// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use
     // with apply.
     type AzureFileVolumeSourceApplyConfiguration struct {
     	SecretName *string `json:"secretName,omitempty"`
    @@ -26,7 +26,7 @@ type AzureFileVolumeSourceApplyConfiguration struct {
     	ReadOnly   *bool   `json:"readOnly,omitempty"`
     }
     
    -// AzureFileVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFileVolumeSource type for use with
    +// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with
     // apply.
     func AzureFileVolumeSource() *AzureFileVolumeSourceApplyConfiguration {
     	return &AzureFileVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
    index c3d176c4d8..1c463aef50 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// CapabilitiesApplyConfiguration represents an declarative configuration of the Capabilities type for use
    +// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use
     // with apply.
     type CapabilitiesApplyConfiguration struct {
     	Add  []v1.Capability `json:"add,omitempty"`
     	Drop []v1.Capability `json:"drop,omitempty"`
     }
     
    -// CapabilitiesApplyConfiguration constructs an declarative configuration of the Capabilities type for use with
    +// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with
     // apply.
     func Capabilities() *CapabilitiesApplyConfiguration {
     	return &CapabilitiesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
    index a41936fe3d..f3ee2d03e9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CephFSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSPersistentVolumeSource type for use
    +// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use
     // with apply.
     type CephFSPersistentVolumeSourceApplyConfiguration struct {
     	Monitors   []string                           `json:"monitors,omitempty"`
    @@ -29,7 +29,7 @@ type CephFSPersistentVolumeSourceApplyConfiguration struct {
     	ReadOnly   *bool                              `json:"readOnly,omitempty"`
     }
     
    -// CephFSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSPersistentVolumeSource type for use with
    +// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with
     // apply.
     func CephFSPersistentVolumeSource() *CephFSPersistentVolumeSourceApplyConfiguration {
     	return &CephFSPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
    index 0ea070ba5d..77d53d6eb0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CephFSVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSVolumeSource type for use
    +// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use
     // with apply.
     type CephFSVolumeSourceApplyConfiguration struct {
     	Monitors   []string                                `json:"monitors,omitempty"`
    @@ -29,7 +29,7 @@ type CephFSVolumeSourceApplyConfiguration struct {
     	ReadOnly   *bool                                   `json:"readOnly,omitempty"`
     }
     
    -// CephFSVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSVolumeSource type for use with
    +// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with
     // apply.
     func CephFSVolumeSource() *CephFSVolumeSourceApplyConfiguration {
     	return &CephFSVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
    index 7754cf92f7..b265734882 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CinderPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CinderPersistentVolumeSource type for use
    +// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use
     // with apply.
     type CinderPersistentVolumeSourceApplyConfiguration struct {
     	VolumeID  *string                            `json:"volumeID,omitempty"`
    @@ -27,7 +27,7 @@ type CinderPersistentVolumeSourceApplyConfiguration struct {
     	SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
     }
     
    -// CinderPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderPersistentVolumeSource type for use with
    +// CinderPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderPersistentVolumeSource type for use with
     // apply.
     func CinderPersistentVolumeSource() *CinderPersistentVolumeSourceApplyConfiguration {
     	return &CinderPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
    index 51271e279d..131cbf219c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CinderVolumeSourceApplyConfiguration represents an declarative configuration of the CinderVolumeSource type for use
    +// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use
     // with apply.
     type CinderVolumeSourceApplyConfiguration struct {
     	VolumeID  *string                                 `json:"volumeID,omitempty"`
    @@ -27,7 +27,7 @@ type CinderVolumeSourceApplyConfiguration struct {
     	SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
     }
     
    -// CinderVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderVolumeSource type for use with
    +// CinderVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderVolumeSource type for use with
     // apply.
     func CinderVolumeSource() *CinderVolumeSourceApplyConfiguration {
     	return &CinderVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go
    deleted file mode 100644
    index 2153570fc0..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go
    +++ /dev/null
    @@ -1,48 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1
    -
    -// ClaimSourceApplyConfiguration represents an declarative configuration of the ClaimSource type for use
    -// with apply.
    -type ClaimSourceApplyConfiguration struct {
    -	ResourceClaimName         *string `json:"resourceClaimName,omitempty"`
    -	ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"`
    -}
    -
    -// ClaimSourceApplyConfiguration constructs an declarative configuration of the ClaimSource type for use with
    -// apply.
    -func ClaimSource() *ClaimSourceApplyConfiguration {
    -	return &ClaimSourceApplyConfiguration{}
    -}
    -
    -// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceClaimName field is set to the value of the last call.
    -func (b *ClaimSourceApplyConfiguration) WithResourceClaimName(value string) *ClaimSourceApplyConfiguration {
    -	b.ResourceClaimName = &value
    -	return b
    -}
    -
    -// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call.
    -func (b *ClaimSourceApplyConfiguration) WithResourceClaimTemplateName(value string) *ClaimSourceApplyConfiguration {
    -	b.ResourceClaimTemplateName = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
    index a666e8faae..02c4e55e13 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// ClientIPConfigApplyConfiguration represents an declarative configuration of the ClientIPConfig type for use
    +// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use
     // with apply.
     type ClientIPConfigApplyConfiguration struct {
     	TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
     }
     
    -// ClientIPConfigApplyConfiguration constructs an declarative configuration of the ClientIPConfig type for use with
    +// ClientIPConfigApplyConfiguration constructs a declarative configuration of the ClientIPConfig type for use with
     // apply.
     func ClientIPConfig() *ClientIPConfigApplyConfiguration {
     	return &ClientIPConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
    index 5aa686782b..bcfbac63e7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use
    +// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use
     // with apply.
     type ClusterTrustBundleProjectionApplyConfiguration struct {
     	Name          *string                             `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ClusterTrustBundleProjectionApplyConfiguration struct {
     	Path          *string                             `json:"path,omitempty"`
     }
     
    -// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with
    +// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with
     // apply.
     func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration {
     	return &ClusterTrustBundleProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
    index 1ef65f5a0c..0044c7c0bb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ComponentConditionApplyConfiguration represents an declarative configuration of the ComponentCondition type for use
    +// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use
     // with apply.
     type ComponentConditionApplyConfiguration struct {
     	Type    *v1.ComponentConditionType `json:"type,omitempty"`
    @@ -31,7 +31,7 @@ type ComponentConditionApplyConfiguration struct {
     	Error   *string                    `json:"error,omitempty"`
     }
     
    -// ComponentConditionApplyConfiguration constructs an declarative configuration of the ComponentCondition type for use with
    +// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with
     // apply.
     func ComponentCondition() *ComponentConditionApplyConfiguration {
     	return &ComponentConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
    index 300e526942..195bde7219 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ComponentStatusApplyConfiguration represents an declarative configuration of the ComponentStatus type for use
    +// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use
     // with apply.
     type ComponentStatusApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type ComponentStatusApplyConfiguration struct {
     	Conditions                       []ComponentConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ComponentStatus constructs an declarative configuration of the ComponentStatus type for use with
    +// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with
     // apply.
     func ComponentStatus(name string) *ComponentStatusApplyConfiguration {
     	b := &ComponentStatusApplyConfiguration{}
    @@ -250,3 +250,9 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ComponentStatusApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
    index f4cc7024d2..576b7a3d68 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ConfigMapApplyConfiguration represents an declarative configuration of the ConfigMap type for use
    +// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use
     // with apply.
     type ConfigMapApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type ConfigMapApplyConfiguration struct {
     	BinaryData                       map[string][]byte `json:"binaryData,omitempty"`
     }
     
    -// ConfigMap constructs an declarative configuration of the ConfigMap type for use with
    +// ConfigMap constructs a declarative configuration of the ConfigMap type for use with
     // apply.
     func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration {
     	b := &ConfigMapApplyConfiguration{}
    @@ -277,3 +277,9 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte)
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ConfigMapApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
    index 8802fff48f..b1fccd7000 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ConfigMapEnvSourceApplyConfiguration represents an declarative configuration of the ConfigMapEnvSource type for use
    +// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use
     // with apply.
     type ConfigMapEnvSourceApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
     	Optional                               *bool `json:"optional,omitempty"`
     }
     
    -// ConfigMapEnvSourceApplyConfiguration constructs an declarative configuration of the ConfigMapEnvSource type for use with
    +// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with
     // apply.
     func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration {
     	return &ConfigMapEnvSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
    index 2a8c800afc..26c2a75b5a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ConfigMapKeySelectorApplyConfiguration represents an declarative configuration of the ConfigMapKeySelector type for use
    +// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use
     // with apply.
     type ConfigMapKeySelectorApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
    @@ -26,7 +26,7 @@ type ConfigMapKeySelectorApplyConfiguration struct {
     	Optional                               *bool   `json:"optional,omitempty"`
     }
     
    -// ConfigMapKeySelectorApplyConfiguration constructs an declarative configuration of the ConfigMapKeySelector type for use with
    +// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with
     // apply.
     func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration {
     	return &ConfigMapKeySelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
    index da9655a544..135bb7d427 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
    @@ -22,7 +22,7 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// ConfigMapNodeConfigSourceApplyConfiguration represents an declarative configuration of the ConfigMapNodeConfigSource type for use
    +// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use
     // with apply.
     type ConfigMapNodeConfigSourceApplyConfiguration struct {
     	Namespace        *string    `json:"namespace,omitempty"`
    @@ -32,7 +32,7 @@ type ConfigMapNodeConfigSourceApplyConfiguration struct {
     	KubeletConfigKey *string    `json:"kubeletConfigKey,omitempty"`
     }
     
    -// ConfigMapNodeConfigSourceApplyConfiguration constructs an declarative configuration of the ConfigMapNodeConfigSource type for use with
    +// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with
     // apply.
     func ConfigMapNodeConfigSource() *ConfigMapNodeConfigSourceApplyConfiguration {
     	return &ConfigMapNodeConfigSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
    index 7297d3a437..308b28f57d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ConfigMapProjectionApplyConfiguration represents an declarative configuration of the ConfigMapProjection type for use
    +// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use
     // with apply.
     type ConfigMapProjectionApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
    @@ -26,7 +26,7 @@ type ConfigMapProjectionApplyConfiguration struct {
     	Optional                               *bool                         `json:"optional,omitempty"`
     }
     
    -// ConfigMapProjectionApplyConfiguration constructs an declarative configuration of the ConfigMapProjection type for use with
    +// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with
     // apply.
     func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration {
     	return &ConfigMapProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
    index deaebde319..8e0e8dc0f4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ConfigMapVolumeSourceApplyConfiguration represents an declarative configuration of the ConfigMapVolumeSource type for use
    +// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use
     // with apply.
     type ConfigMapVolumeSourceApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
    @@ -27,7 +27,7 @@ type ConfigMapVolumeSourceApplyConfiguration struct {
     	Optional                               *bool                         `json:"optional,omitempty"`
     }
     
    -// ConfigMapVolumeSourceApplyConfiguration constructs an declarative configuration of the ConfigMapVolumeSource type for use with
    +// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with
     // apply.
     func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration {
     	return &ConfigMapVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
    index 32d7156063..eed5f7d027 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// ContainerApplyConfiguration represents an declarative configuration of the Container type for use
    +// ContainerApplyConfiguration represents a declarative configuration of the Container type for use
     // with apply.
     type ContainerApplyConfiguration struct {
     	Name                     *string                                   `json:"name,omitempty"`
    @@ -51,7 +51,7 @@ type ContainerApplyConfiguration struct {
     	TTY                      *bool                                     `json:"tty,omitempty"`
     }
     
    -// ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with
    +// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with
     // apply.
     func Container() *ContainerApplyConfiguration {
     	return &ContainerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
    index d5c874a7ce..bc9428fd10 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ContainerImageApplyConfiguration represents an declarative configuration of the ContainerImage type for use
    +// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use
     // with apply.
     type ContainerImageApplyConfiguration struct {
     	Names     []string `json:"names,omitempty"`
     	SizeBytes *int64   `json:"sizeBytes,omitempty"`
     }
     
    -// ContainerImageApplyConfiguration constructs an declarative configuration of the ContainerImage type for use with
    +// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with
     // apply.
     func ContainerImage() *ContainerImageApplyConfiguration {
     	return &ContainerImageApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
    index a23ad9268a..7acc0638f2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerPortApplyConfiguration represents an declarative configuration of the ContainerPort type for use
    +// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use
     // with apply.
     type ContainerPortApplyConfiguration struct {
     	Name          *string      `json:"name,omitempty"`
    @@ -32,7 +32,7 @@ type ContainerPortApplyConfiguration struct {
     	HostIP        *string      `json:"hostIP,omitempty"`
     }
     
    -// ContainerPortApplyConfiguration constructs an declarative configuration of the ContainerPort type for use with
    +// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with
     // apply.
     func ContainerPort() *ContainerPortApplyConfiguration {
     	return &ContainerPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
    index bbbcbc9f13..ea60e3d987 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use
    +// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use
     // with apply.
     type ContainerResizePolicyApplyConfiguration struct {
     	ResourceName  *v1.ResourceName                `json:"resourceName,omitempty"`
     	RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"`
     }
     
    -// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with
    +// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with
     // apply.
     func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration {
     	return &ContainerResizePolicyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
    index 6cbfc7fd9b..b958e01774 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ContainerStateApplyConfiguration represents an declarative configuration of the ContainerState type for use
    +// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use
     // with apply.
     type ContainerStateApplyConfiguration struct {
     	Waiting    *ContainerStateWaitingApplyConfiguration    `json:"waiting,omitempty"`
    @@ -26,7 +26,7 @@ type ContainerStateApplyConfiguration struct {
     	Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"`
     }
     
    -// ContainerStateApplyConfiguration constructs an declarative configuration of the ContainerState type for use with
    +// ContainerStateApplyConfiguration constructs a declarative configuration of the ContainerState type for use with
     // apply.
     func ContainerState() *ContainerStateApplyConfiguration {
     	return &ContainerStateApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
    index 6c1d7311e7..6eec9f7f2c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ContainerStateRunningApplyConfiguration represents an declarative configuration of the ContainerStateRunning type for use
    +// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use
     // with apply.
     type ContainerStateRunningApplyConfiguration struct {
     	StartedAt *v1.Time `json:"startedAt,omitempty"`
     }
     
    -// ContainerStateRunningApplyConfiguration constructs an declarative configuration of the ContainerStateRunning type for use with
    +// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with
     // apply.
     func ContainerStateRunning() *ContainerStateRunningApplyConfiguration {
     	return &ContainerStateRunningApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
    index 0383c9dd9d..b067aa211e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ContainerStateTerminatedApplyConfiguration represents an declarative configuration of the ContainerStateTerminated type for use
    +// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use
     // with apply.
     type ContainerStateTerminatedApplyConfiguration struct {
     	ExitCode    *int32   `json:"exitCode,omitempty"`
    @@ -34,7 +34,7 @@ type ContainerStateTerminatedApplyConfiguration struct {
     	ContainerID *string  `json:"containerID,omitempty"`
     }
     
    -// ContainerStateTerminatedApplyConfiguration constructs an declarative configuration of the ContainerStateTerminated type for use with
    +// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with
     // apply.
     func ContainerStateTerminated() *ContainerStateTerminatedApplyConfiguration {
     	return &ContainerStateTerminatedApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
    index e51b778c0d..7756c7da03 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ContainerStateWaitingApplyConfiguration represents an declarative configuration of the ContainerStateWaiting type for use
    +// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use
     // with apply.
     type ContainerStateWaitingApplyConfiguration struct {
     	Reason  *string `json:"reason,omitempty"`
     	Message *string `json:"message,omitempty"`
     }
     
    -// ContainerStateWaitingApplyConfiguration constructs an declarative configuration of the ContainerStateWaiting type for use with
    +// ContainerStateWaitingApplyConfiguration constructs a declarative configuration of the ContainerStateWaiting type for use with
     // apply.
     func ContainerStateWaiting() *ContainerStateWaitingApplyConfiguration {
     	return &ContainerStateWaitingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
    index e3f774bbb3..6a28939c2f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
    @@ -22,24 +22,26 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use
    +// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use
     // with apply.
     type ContainerStatusApplyConfiguration struct {
    -	Name                 *string                                 `json:"name,omitempty"`
    -	State                *ContainerStateApplyConfiguration       `json:"state,omitempty"`
    -	LastTerminationState *ContainerStateApplyConfiguration       `json:"lastState,omitempty"`
    -	Ready                *bool                                   `json:"ready,omitempty"`
    -	RestartCount         *int32                                  `json:"restartCount,omitempty"`
    -	Image                *string                                 `json:"image,omitempty"`
    -	ImageID              *string                                 `json:"imageID,omitempty"`
    -	ContainerID          *string                                 `json:"containerID,omitempty"`
    -	Started              *bool                                   `json:"started,omitempty"`
    -	AllocatedResources   *corev1.ResourceList                    `json:"allocatedResources,omitempty"`
    -	Resources            *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
    -	VolumeMounts         []VolumeMountStatusApplyConfiguration   `json:"volumeMounts,omitempty"`
    -}
    -
    -// ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with
    +	Name                     *string                                 `json:"name,omitempty"`
    +	State                    *ContainerStateApplyConfiguration       `json:"state,omitempty"`
    +	LastTerminationState     *ContainerStateApplyConfiguration       `json:"lastState,omitempty"`
    +	Ready                    *bool                                   `json:"ready,omitempty"`
    +	RestartCount             *int32                                  `json:"restartCount,omitempty"`
    +	Image                    *string                                 `json:"image,omitempty"`
    +	ImageID                  *string                                 `json:"imageID,omitempty"`
    +	ContainerID              *string                                 `json:"containerID,omitempty"`
    +	Started                  *bool                                   `json:"started,omitempty"`
    +	AllocatedResources       *corev1.ResourceList                    `json:"allocatedResources,omitempty"`
    +	Resources                *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
    +	VolumeMounts             []VolumeMountStatusApplyConfiguration   `json:"volumeMounts,omitempty"`
    +	User                     *ContainerUserApplyConfiguration        `json:"user,omitempty"`
    +	AllocatedResourcesStatus []ResourceStatusApplyConfiguration      `json:"allocatedResourcesStatus,omitempty"`
    +}
    +
    +// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with
     // apply.
     func ContainerStatus() *ContainerStatusApplyConfiguration {
     	return &ContainerStatusApplyConfiguration{}
    @@ -145,3 +147,24 @@ func (b *ContainerStatusApplyConfiguration) WithVolumeMounts(values ...*VolumeMo
     	}
     	return b
     }
    +
    +// WithUser sets the User field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the User field is set to the value of the last call.
    +func (b *ContainerStatusApplyConfiguration) WithUser(value *ContainerUserApplyConfiguration) *ContainerStatusApplyConfiguration {
    +	b.User = value
    +	return b
    +}
    +
    +// WithAllocatedResourcesStatus adds the given value to the AllocatedResourcesStatus field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the AllocatedResourcesStatus field.
    +func (b *ContainerStatusApplyConfiguration) WithAllocatedResourcesStatus(values ...*ResourceStatusApplyConfiguration) *ContainerStatusApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithAllocatedResourcesStatus")
    +		}
    +		b.AllocatedResourcesStatus = append(b.AllocatedResourcesStatus, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
    new file mode 100644
    index 0000000000..34ec8e4146
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use
    +// with apply.
    +type ContainerUserApplyConfiguration struct {
    +	Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"`
    +}
    +
    +// ContainerUserApplyConfiguration constructs a declarative configuration of the ContainerUser type for use with
    +// apply.
    +func ContainerUser() *ContainerUserApplyConfiguration {
    +	return &ContainerUserApplyConfiguration{}
    +}
    +
    +// WithLinux sets the Linux field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Linux field is set to the value of the last call.
    +func (b *ContainerUserApplyConfiguration) WithLinux(value *LinuxContainerUserApplyConfiguration) *ContainerUserApplyConfiguration {
    +	b.Linux = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
    index 2fc681604e..a614d10805 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CSIPersistentVolumeSource type for use
    +// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use
     // with apply.
     type CSIPersistentVolumeSourceApplyConfiguration struct {
     	Driver                     *string                            `json:"driver,omitempty"`
    @@ -33,7 +33,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct {
     	NodeExpandSecretRef        *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"`
     }
     
    -// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with
    +// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with
     // apply.
     func CSIPersistentVolumeSource() *CSIPersistentVolumeSourceApplyConfiguration {
     	return &CSIPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
    index c2a32df8d0..b58d9bbb4b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CSIVolumeSourceApplyConfiguration represents an declarative configuration of the CSIVolumeSource type for use
    +// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use
     // with apply.
     type CSIVolumeSourceApplyConfiguration struct {
     	Driver               *string                                 `json:"driver,omitempty"`
    @@ -28,7 +28,7 @@ type CSIVolumeSourceApplyConfiguration struct {
     	NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"`
     }
     
    -// CSIVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIVolumeSource type for use with
    +// CSIVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIVolumeSource type for use with
     // apply.
     func CSIVolumeSource() *CSIVolumeSourceApplyConfiguration {
     	return &CSIVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
    index 13a2e948f1..5be27ec0c5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// DaemonEndpointApplyConfiguration represents an declarative configuration of the DaemonEndpoint type for use
    +// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use
     // with apply.
     type DaemonEndpointApplyConfiguration struct {
     	Port *int32 `json:"Port,omitempty"`
     }
     
    -// DaemonEndpointApplyConfiguration constructs an declarative configuration of the DaemonEndpoint type for use with
    +// DaemonEndpointApplyConfiguration constructs a declarative configuration of the DaemonEndpoint type for use with
     // apply.
     func DaemonEndpoint() *DaemonEndpointApplyConfiguration {
     	return &DaemonEndpointApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
    index f88a87c0b5..ed6b8b1bbe 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// DownwardAPIProjectionApplyConfiguration represents an declarative configuration of the DownwardAPIProjection type for use
    +// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use
     // with apply.
     type DownwardAPIProjectionApplyConfiguration struct {
     	Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"`
     }
     
    -// DownwardAPIProjectionApplyConfiguration constructs an declarative configuration of the DownwardAPIProjection type for use with
    +// DownwardAPIProjectionApplyConfiguration constructs a declarative configuration of the DownwardAPIProjection type for use with
     // apply.
     func DownwardAPIProjection() *DownwardAPIProjectionApplyConfiguration {
     	return &DownwardAPIProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
    index b25ff25fa9..ec9d013dd9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// DownwardAPIVolumeFileApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeFile type for use
    +// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use
     // with apply.
     type DownwardAPIVolumeFileApplyConfiguration struct {
     	Path             *string                                  `json:"path,omitempty"`
    @@ -27,7 +27,7 @@ type DownwardAPIVolumeFileApplyConfiguration struct {
     	Mode             *int32                                   `json:"mode,omitempty"`
     }
     
    -// DownwardAPIVolumeFileApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeFile type for use with
    +// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with
     // apply.
     func DownwardAPIVolumeFile() *DownwardAPIVolumeFileApplyConfiguration {
     	return &DownwardAPIVolumeFileApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
    index 6913bb5218..eef9d7ef8d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// DownwardAPIVolumeSourceApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeSource type for use
    +// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use
     // with apply.
     type DownwardAPIVolumeSourceApplyConfiguration struct {
     	Items       []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"`
     	DefaultMode *int32                                    `json:"defaultMode,omitempty"`
     }
     
    -// DownwardAPIVolumeSourceApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeSource type for use with
    +// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with
     // apply.
     func DownwardAPIVolumeSource() *DownwardAPIVolumeSourceApplyConfiguration {
     	return &DownwardAPIVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
    index 021280daf6..a619fdb074 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
    @@ -23,14 +23,14 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// EmptyDirVolumeSourceApplyConfiguration represents an declarative configuration of the EmptyDirVolumeSource type for use
    +// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use
     // with apply.
     type EmptyDirVolumeSourceApplyConfiguration struct {
     	Medium    *v1.StorageMedium  `json:"medium,omitempty"`
     	SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
     }
     
    -// EmptyDirVolumeSourceApplyConfiguration constructs an declarative configuration of the EmptyDirVolumeSource type for use with
    +// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with
     // apply.
     func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration {
     	return &EmptyDirVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
    index 52a54b6008..536e697a9a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EndpointAddressApplyConfiguration represents an declarative configuration of the EndpointAddress type for use
    +// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use
     // with apply.
     type EndpointAddressApplyConfiguration struct {
     	IP        *string                            `json:"ip,omitempty"`
    @@ -27,7 +27,7 @@ type EndpointAddressApplyConfiguration struct {
     	TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
     }
     
    -// EndpointAddressApplyConfiguration constructs an declarative configuration of the EndpointAddress type for use with
    +// EndpointAddressApplyConfiguration constructs a declarative configuration of the EndpointAddress type for use with
     // apply.
     func EndpointAddress() *EndpointAddressApplyConfiguration {
     	return &EndpointAddressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
    index cc00d0e491..d0d96230ce 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use
    +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
     // with apply.
     type EndpointPortApplyConfiguration struct {
     	Name        *string      `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct {
     	AppProtocol *string      `json:"appProtocol,omitempty"`
     }
     
    -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with
    +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
     // apply.
     func EndpointPort() *EndpointPortApplyConfiguration {
     	return &EndpointPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
    index b98fed0858..98dc69aaab 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EndpointsApplyConfiguration represents an declarative configuration of the Endpoints type for use
    +// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use
     // with apply.
     type EndpointsApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type EndpointsApplyConfiguration struct {
     	Subsets                          []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"`
     }
     
    -// Endpoints constructs an declarative configuration of the Endpoints type for use with
    +// Endpoints constructs a declarative configuration of the Endpoints type for use with
     // apply.
     func Endpoints(name, namespace string) *EndpointsApplyConfiguration {
     	b := &EndpointsApplyConfiguration{}
    @@ -252,3 +252,9 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EndpointsApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
    index cd0657a80c..33cd8496a7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EndpointSubsetApplyConfiguration represents an declarative configuration of the EndpointSubset type for use
    +// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use
     // with apply.
     type EndpointSubsetApplyConfiguration struct {
     	Addresses         []EndpointAddressApplyConfiguration `json:"addresses,omitempty"`
    @@ -26,7 +26,7 @@ type EndpointSubsetApplyConfiguration struct {
     	Ports             []EndpointPortApplyConfiguration    `json:"ports,omitempty"`
     }
     
    -// EndpointSubsetApplyConfiguration constructs an declarative configuration of the EndpointSubset type for use with
    +// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with
     // apply.
     func EndpointSubset() *EndpointSubsetApplyConfiguration {
     	return &EndpointSubsetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
    index 9e46d25ded..7aa181cf1a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EnvFromSourceApplyConfiguration represents an declarative configuration of the EnvFromSource type for use
    +// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use
     // with apply.
     type EnvFromSourceApplyConfiguration struct {
     	Prefix       *string                               `json:"prefix,omitempty"`
    @@ -26,7 +26,7 @@ type EnvFromSourceApplyConfiguration struct {
     	SecretRef    *SecretEnvSourceApplyConfiguration    `json:"secretRef,omitempty"`
     }
     
    -// EnvFromSourceApplyConfiguration constructs an declarative configuration of the EnvFromSource type for use with
    +// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with
     // apply.
     func EnvFromSource() *EnvFromSourceApplyConfiguration {
     	return &EnvFromSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
    index a83528a28e..5894166ca4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use
    +// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use
     // with apply.
     type EnvVarApplyConfiguration struct {
     	Name      *string                         `json:"name,omitempty"`
    @@ -26,7 +26,7 @@ type EnvVarApplyConfiguration struct {
     	ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"`
     }
     
    -// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with
    +// EnvVarApplyConfiguration constructs a declarative configuration of the EnvVar type for use with
     // apply.
     func EnvVar() *EnvVarApplyConfiguration {
     	return &EnvVarApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
    index 70c695bd5b..a3a55ea7af 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EnvVarSourceApplyConfiguration represents an declarative configuration of the EnvVarSource type for use
    +// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use
     // with apply.
     type EnvVarSourceApplyConfiguration struct {
     	FieldRef         *ObjectFieldSelectorApplyConfiguration   `json:"fieldRef,omitempty"`
    @@ -27,7 +27,7 @@ type EnvVarSourceApplyConfiguration struct {
     	SecretKeyRef     *SecretKeySelectorApplyConfiguration     `json:"secretKeyRef,omitempty"`
     }
     
    -// EnvVarSourceApplyConfiguration constructs an declarative configuration of the EnvVarSource type for use with
    +// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with
     // apply.
     func EnvVarSource() *EnvVarSourceApplyConfiguration {
     	return &EnvVarSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
    index 5fa79a246e..a15ac6ec34 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
    @@ -22,14 +22,14 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// EphemeralContainerApplyConfiguration represents an declarative configuration of the EphemeralContainer type for use
    +// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use
     // with apply.
     type EphemeralContainerApplyConfiguration struct {
     	EphemeralContainerCommonApplyConfiguration `json:",inline"`
     	TargetContainerName                        *string `json:"targetContainerName,omitempty"`
     }
     
    -// EphemeralContainerApplyConfiguration constructs an declarative configuration of the EphemeralContainer type for use with
    +// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with
     // apply.
     func EphemeralContainer() *EphemeralContainerApplyConfiguration {
     	return &EphemeralContainerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
    index 8cded29a9e..d5d13d27a0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use
    +// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use
     // with apply.
     type EphemeralContainerCommonApplyConfiguration struct {
     	Name                     *string                                   `json:"name,omitempty"`
    @@ -51,7 +51,7 @@ type EphemeralContainerCommonApplyConfiguration struct {
     	TTY                      *bool                                     `json:"tty,omitempty"`
     }
     
    -// EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with
    +// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with
     // apply.
     func EphemeralContainerCommon() *EphemeralContainerCommonApplyConfiguration {
     	return &EphemeralContainerCommonApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
    index 31859404cc..d2c8c6722e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// EphemeralVolumeSourceApplyConfiguration represents an declarative configuration of the EphemeralVolumeSource type for use
    +// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use
     // with apply.
     type EphemeralVolumeSourceApplyConfiguration struct {
     	VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"`
     }
     
    -// EphemeralVolumeSourceApplyConfiguration constructs an declarative configuration of the EphemeralVolumeSource type for use with
    +// EphemeralVolumeSourceApplyConfiguration constructs a declarative configuration of the EphemeralVolumeSource type for use with
     // apply.
     func EphemeralVolumeSource() *EphemeralVolumeSourceApplyConfiguration {
     	return &EphemeralVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
    index 60aff6b5b2..65d6577ab6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EventApplyConfiguration represents an declarative configuration of the Event type for use
    +// EventApplyConfiguration represents a declarative configuration of the Event type for use
     // with apply.
     type EventApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -48,7 +48,7 @@ type EventApplyConfiguration struct {
     	ReportingInstance                *string                            `json:"reportingInstance,omitempty"`
     }
     
    -// Event constructs an declarative configuration of the Event type for use with
    +// Event constructs a declarative configuration of the Event type for use with
     // apply.
     func Event(name, namespace string) *EventApplyConfiguration {
     	b := &EventApplyConfiguration{}
    @@ -364,3 +364,9 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl
     	b.ReportingInstance = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EventApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
    index e66fb41271..18069c0d1b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use
    +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
     // with apply.
     type EventSeriesApplyConfiguration struct {
     	Count            *int32        `json:"count,omitempty"`
     	LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
     }
     
    -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with
    +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with
     // apply.
     func EventSeries() *EventSeriesApplyConfiguration {
     	return &EventSeriesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
    index 2eb4aa8e44..97edb04931 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// EventSourceApplyConfiguration represents an declarative configuration of the EventSource type for use
    +// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use
     // with apply.
     type EventSourceApplyConfiguration struct {
     	Component *string `json:"component,omitempty"`
     	Host      *string `json:"host,omitempty"`
     }
     
    -// EventSourceApplyConfiguration constructs an declarative configuration of the EventSource type for use with
    +// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with
     // apply.
     func EventSource() *EventSourceApplyConfiguration {
     	return &EventSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
    index 1df52144d7..b7208a91cf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// ExecActionApplyConfiguration represents an declarative configuration of the ExecAction type for use
    +// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use
     // with apply.
     type ExecActionApplyConfiguration struct {
     	Command []string `json:"command,omitempty"`
     }
     
    -// ExecActionApplyConfiguration constructs an declarative configuration of the ExecAction type for use with
    +// ExecActionApplyConfiguration constructs a declarative configuration of the ExecAction type for use with
     // apply.
     func ExecAction() *ExecActionApplyConfiguration {
     	return &ExecActionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
    index 43069de9a6..000ff2cc62 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// FCVolumeSourceApplyConfiguration represents an declarative configuration of the FCVolumeSource type for use
    +// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use
     // with apply.
     type FCVolumeSourceApplyConfiguration struct {
     	TargetWWNs []string `json:"targetWWNs,omitempty"`
    @@ -28,7 +28,7 @@ type FCVolumeSourceApplyConfiguration struct {
     	WWIDs      []string `json:"wwids,omitempty"`
     }
     
    -// FCVolumeSourceApplyConfiguration constructs an declarative configuration of the FCVolumeSource type for use with
    +// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with
     // apply.
     func FCVolumeSource() *FCVolumeSourceApplyConfiguration {
     	return &FCVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
    index 47e7c746ee..355c2c82d0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// FlexPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the FlexPersistentVolumeSource type for use
    +// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use
     // with apply.
     type FlexPersistentVolumeSourceApplyConfiguration struct {
     	Driver    *string                            `json:"driver,omitempty"`
    @@ -28,7 +28,7 @@ type FlexPersistentVolumeSourceApplyConfiguration struct {
     	Options   map[string]string                  `json:"options,omitempty"`
     }
     
    -// FlexPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexPersistentVolumeSource type for use with
    +// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with
     // apply.
     func FlexPersistentVolumeSource() *FlexPersistentVolumeSourceApplyConfiguration {
     	return &FlexPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
    index 7c09516a98..08ae9e1bea 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// FlexVolumeSourceApplyConfiguration represents an declarative configuration of the FlexVolumeSource type for use
    +// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use
     // with apply.
     type FlexVolumeSourceApplyConfiguration struct {
     	Driver    *string                                 `json:"driver,omitempty"`
    @@ -28,7 +28,7 @@ type FlexVolumeSourceApplyConfiguration struct {
     	Options   map[string]string                       `json:"options,omitempty"`
     }
     
    -// FlexVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexVolumeSource type for use with
    +// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with
     // apply.
     func FlexVolumeSource() *FlexVolumeSourceApplyConfiguration {
     	return &FlexVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
    index 74896d55ac..e4ecbba0e4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// FlockerVolumeSourceApplyConfiguration represents an declarative configuration of the FlockerVolumeSource type for use
    +// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use
     // with apply.
     type FlockerVolumeSourceApplyConfiguration struct {
     	DatasetName *string `json:"datasetName,omitempty"`
     	DatasetUUID *string `json:"datasetUUID,omitempty"`
     }
     
    -// FlockerVolumeSourceApplyConfiguration constructs an declarative configuration of the FlockerVolumeSource type for use with
    +// FlockerVolumeSourceApplyConfiguration constructs a declarative configuration of the FlockerVolumeSource type for use with
     // apply.
     func FlockerVolumeSource() *FlockerVolumeSourceApplyConfiguration {
     	return &FlockerVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
    index 0869d3eaa6..56c4d03fa2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// GCEPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the GCEPersistentDiskVolumeSource type for use
    +// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use
     // with apply.
     type GCEPersistentDiskVolumeSourceApplyConfiguration struct {
     	PDName    *string `json:"pdName,omitempty"`
    @@ -27,7 +27,7 @@ type GCEPersistentDiskVolumeSourceApplyConfiguration struct {
     	ReadOnly  *bool   `json:"readOnly,omitempty"`
     }
     
    -// GCEPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the GCEPersistentDiskVolumeSource type for use with
    +// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with
     // apply.
     func GCEPersistentDiskVolumeSource() *GCEPersistentDiskVolumeSourceApplyConfiguration {
     	return &GCEPersistentDiskVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
    index 825e02e4e4..4ed92317c8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// GitRepoVolumeSourceApplyConfiguration represents an declarative configuration of the GitRepoVolumeSource type for use
    +// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use
     // with apply.
     type GitRepoVolumeSourceApplyConfiguration struct {
     	Repository *string `json:"repository,omitempty"`
    @@ -26,7 +26,7 @@ type GitRepoVolumeSourceApplyConfiguration struct {
     	Directory  *string `json:"directory,omitempty"`
     }
     
    -// GitRepoVolumeSourceApplyConfiguration constructs an declarative configuration of the GitRepoVolumeSource type for use with
    +// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with
     // apply.
     func GitRepoVolumeSource() *GitRepoVolumeSourceApplyConfiguration {
     	return &GitRepoVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
    index 21a3925e52..c9a23ca5d7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// GlusterfsPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsPersistentVolumeSource type for use
    +// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use
     // with apply.
     type GlusterfsPersistentVolumeSourceApplyConfiguration struct {
     	EndpointsName      *string `json:"endpoints,omitempty"`
    @@ -27,7 +27,7 @@ type GlusterfsPersistentVolumeSourceApplyConfiguration struct {
     	EndpointsNamespace *string `json:"endpointsNamespace,omitempty"`
     }
     
    -// GlusterfsPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsPersistentVolumeSource type for use with
    +// GlusterfsPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsPersistentVolumeSource type for use with
     // apply.
     func GlusterfsPersistentVolumeSource() *GlusterfsPersistentVolumeSourceApplyConfiguration {
     	return &GlusterfsPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
    index 7ce6f0b399..8c27f8c70d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// GlusterfsVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsVolumeSource type for use
    +// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use
     // with apply.
     type GlusterfsVolumeSourceApplyConfiguration struct {
     	EndpointsName *string `json:"endpoints,omitempty"`
    @@ -26,7 +26,7 @@ type GlusterfsVolumeSourceApplyConfiguration struct {
     	ReadOnly      *bool   `json:"readOnly,omitempty"`
     }
     
    -// GlusterfsVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsVolumeSource type for use with
    +// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with
     // apply.
     func GlusterfsVolumeSource() *GlusterfsVolumeSourceApplyConfiguration {
     	return &GlusterfsVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
    index f94e55937a..0f3a886714 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use
    +// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use
     // with apply.
     type GRPCActionApplyConfiguration struct {
     	Port    *int32  `json:"port,omitempty"`
     	Service *string `json:"service,omitempty"`
     }
     
    -// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with
    +// GRPCActionApplyConfiguration constructs a declarative configuration of the GRPCAction type for use with
     // apply.
     func GRPCAction() *GRPCActionApplyConfiguration {
     	return &GRPCActionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
    index 861508ef53..ec9ea17413 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// HostAliasApplyConfiguration represents an declarative configuration of the HostAlias type for use
    +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use
     // with apply.
     type HostAliasApplyConfiguration struct {
     	IP        *string  `json:"ip,omitempty"`
     	Hostnames []string `json:"hostnames,omitempty"`
     }
     
    -// HostAliasApplyConfiguration constructs an declarative configuration of the HostAlias type for use with
    +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with
     // apply.
     func HostAlias() *HostAliasApplyConfiguration {
     	return &HostAliasApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
    index c2a42cf747..439b5ce2d6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use
    +// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use
     // with apply.
     type HostIPApplyConfiguration struct {
     	IP *string `json:"ip,omitempty"`
     }
     
    -// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with
    +// HostIPApplyConfiguration constructs a declarative configuration of the HostIP type for use with
     // apply.
     func HostIP() *HostIPApplyConfiguration {
     	return &HostIPApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
    index 8b15689eef..10dfedfdef 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// HostPathVolumeSourceApplyConfiguration represents an declarative configuration of the HostPathVolumeSource type for use
    +// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use
     // with apply.
     type HostPathVolumeSourceApplyConfiguration struct {
     	Path *string          `json:"path,omitempty"`
     	Type *v1.HostPathType `json:"type,omitempty"`
     }
     
    -// HostPathVolumeSourceApplyConfiguration constructs an declarative configuration of the HostPathVolumeSource type for use with
    +// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with
     // apply.
     func HostPathVolumeSource() *HostPathVolumeSourceApplyConfiguration {
     	return &HostPathVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
    index e4ecdd4303..5ecbc27fea 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
    @@ -23,7 +23,7 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// HTTPGetActionApplyConfiguration represents an declarative configuration of the HTTPGetAction type for use
    +// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use
     // with apply.
     type HTTPGetActionApplyConfiguration struct {
     	Path        *string                        `json:"path,omitempty"`
    @@ -33,7 +33,7 @@ type HTTPGetActionApplyConfiguration struct {
     	HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"`
     }
     
    -// HTTPGetActionApplyConfiguration constructs an declarative configuration of the HTTPGetAction type for use with
    +// HTTPGetActionApplyConfiguration constructs a declarative configuration of the HTTPGetAction type for use with
     // apply.
     func HTTPGetAction() *HTTPGetActionApplyConfiguration {
     	return &HTTPGetActionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
    index d55f36bfd2..2526371669 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// HTTPHeaderApplyConfiguration represents an declarative configuration of the HTTPHeader type for use
    +// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use
     // with apply.
     type HTTPHeaderApplyConfiguration struct {
     	Name  *string `json:"name,omitempty"`
     	Value *string `json:"value,omitempty"`
     }
     
    -// HTTPHeaderApplyConfiguration constructs an declarative configuration of the HTTPHeader type for use with
    +// HTTPHeaderApplyConfiguration constructs a declarative configuration of the HTTPHeader type for use with
     // apply.
     func HTTPHeader() *HTTPHeaderApplyConfiguration {
     	return &HTTPHeaderApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
    new file mode 100644
    index 0000000000..340f150400
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +)
    +
    +// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use
    +// with apply.
    +type ImageVolumeSourceApplyConfiguration struct {
    +	Reference  *string        `json:"reference,omitempty"`
    +	PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"`
    +}
    +
    +// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with
    +// apply.
    +func ImageVolumeSource() *ImageVolumeSourceApplyConfiguration {
    +	return &ImageVolumeSourceApplyConfiguration{}
    +}
    +
    +// WithReference sets the Reference field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Reference field is set to the value of the last call.
    +func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *ImageVolumeSourceApplyConfiguration {
    +	b.Reference = &value
    +	return b
    +}
    +
    +// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the PullPolicy field is set to the value of the last call.
    +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration {
    +	b.PullPolicy = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
    index c7b248181a..42f420c568 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ISCSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIPersistentVolumeSource type for use
    +// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use
     // with apply.
     type ISCSIPersistentVolumeSourceApplyConfiguration struct {
     	TargetPortal      *string                            `json:"targetPortal,omitempty"`
    @@ -34,7 +34,7 @@ type ISCSIPersistentVolumeSourceApplyConfiguration struct {
     	InitiatorName     *string                            `json:"initiatorName,omitempty"`
     }
     
    -// ISCSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIPersistentVolumeSource type for use with
    +// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with
     // apply.
     func ISCSIPersistentVolumeSource() *ISCSIPersistentVolumeSourceApplyConfiguration {
     	return &ISCSIPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
    index c95941a9c7..61055434bc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ISCSIVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIVolumeSource type for use
    +// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use
     // with apply.
     type ISCSIVolumeSourceApplyConfiguration struct {
     	TargetPortal      *string                                 `json:"targetPortal,omitempty"`
    @@ -34,7 +34,7 @@ type ISCSIVolumeSourceApplyConfiguration struct {
     	InitiatorName     *string                                 `json:"initiatorName,omitempty"`
     }
     
    -// ISCSIVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIVolumeSource type for use with
    +// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with
     // apply.
     func ISCSIVolumeSource() *ISCSIVolumeSourceApplyConfiguration {
     	return &ISCSIVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
    index d58676d34c..c961b07955 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// KeyToPathApplyConfiguration represents an declarative configuration of the KeyToPath type for use
    +// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use
     // with apply.
     type KeyToPathApplyConfiguration struct {
     	Key  *string `json:"key,omitempty"`
    @@ -26,7 +26,7 @@ type KeyToPathApplyConfiguration struct {
     	Mode *int32  `json:"mode,omitempty"`
     }
     
    -// KeyToPathApplyConfiguration constructs an declarative configuration of the KeyToPath type for use with
    +// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with
     // apply.
     func KeyToPath() *KeyToPathApplyConfiguration {
     	return &KeyToPathApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
    index db9abf8af7..e37a30f597 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use
    +// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use
     // with apply.
     type LifecycleApplyConfiguration struct {
     	PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"`
     	PreStop   *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"`
     }
     
    -// LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with
    +// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with
     // apply.
     func Lifecycle() *LifecycleApplyConfiguration {
     	return &LifecycleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
    index e4ae9c49f7..b7c706d58d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use
    +// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use
     // with apply.
     type LifecycleHandlerApplyConfiguration struct {
     	Exec      *ExecActionApplyConfiguration      `json:"exec,omitempty"`
    @@ -27,7 +27,7 @@ type LifecycleHandlerApplyConfiguration struct {
     	Sleep     *SleepActionApplyConfiguration     `json:"sleep,omitempty"`
     }
     
    -// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with
    +// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with
     // apply.
     func LifecycleHandler() *LifecycleHandlerApplyConfiguration {
     	return &LifecycleHandlerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
    index eaf635c76a..7770200a0a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// LimitRangeApplyConfiguration represents an declarative configuration of the LimitRange type for use
    +// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use
     // with apply.
     type LimitRangeApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type LimitRangeApplyConfiguration struct {
     	Spec                             *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// LimitRange constructs an declarative configuration of the LimitRange type for use with
    +// LimitRange constructs a declarative configuration of the LimitRange type for use with
     // apply.
     func LimitRange(name, namespace string) *LimitRangeApplyConfiguration {
     	b := &LimitRangeApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *LimitRangeApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
    index 084650fdaa..61d8344e80 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// LimitRangeItemApplyConfiguration represents an declarative configuration of the LimitRangeItem type for use
    +// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use
     // with apply.
     type LimitRangeItemApplyConfiguration struct {
     	Type                 *v1.LimitType    `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type LimitRangeItemApplyConfiguration struct {
     	MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"`
     }
     
    -// LimitRangeItemApplyConfiguration constructs an declarative configuration of the LimitRangeItem type for use with
    +// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with
     // apply.
     func LimitRangeItem() *LimitRangeItemApplyConfiguration {
     	return &LimitRangeItemApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
    index 5eee5c498e..8d69c1c0cd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// LimitRangeSpecApplyConfiguration represents an declarative configuration of the LimitRangeSpec type for use
    +// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use
     // with apply.
     type LimitRangeSpecApplyConfiguration struct {
     	Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"`
     }
     
    -// LimitRangeSpecApplyConfiguration constructs an declarative configuration of the LimitRangeSpec type for use with
    +// LimitRangeSpecApplyConfiguration constructs a declarative configuration of the LimitRangeSpec type for use with
     // apply.
     func LimitRangeSpec() *LimitRangeSpecApplyConfiguration {
     	return &LimitRangeSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
    new file mode 100644
    index 0000000000..fbab4815ab
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
    @@ -0,0 +1,59 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use
    +// with apply.
    +type LinuxContainerUserApplyConfiguration struct {
    +	UID                *int64  `json:"uid,omitempty"`
    +	GID                *int64  `json:"gid,omitempty"`
    +	SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
    +}
    +
    +// LinuxContainerUserApplyConfiguration constructs a declarative configuration of the LinuxContainerUser type for use with
    +// apply.
    +func LinuxContainerUser() *LinuxContainerUserApplyConfiguration {
    +	return &LinuxContainerUserApplyConfiguration{}
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *LinuxContainerUserApplyConfiguration) WithUID(value int64) *LinuxContainerUserApplyConfiguration {
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithGID sets the GID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GID field is set to the value of the last call.
    +func (b *LinuxContainerUserApplyConfiguration) WithGID(value int64) *LinuxContainerUserApplyConfiguration {
    +	b.GID = &value
    +	return b
    +}
    +
    +// WithSupplementalGroups adds the given value to the SupplementalGroups field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the SupplementalGroups field.
    +func (b *LinuxContainerUserApplyConfiguration) WithSupplementalGroups(values ...int64) *LinuxContainerUserApplyConfiguration {
    +	for i := range values {
    +		b.SupplementalGroups = append(b.SupplementalGroups, values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
    index a48dac6810..1a7d998152 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use
    +// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use
     // with apply.
     type LoadBalancerIngressApplyConfiguration struct {
     	IP       *string                        `json:"ip,omitempty"`
    @@ -31,7 +31,7 @@ type LoadBalancerIngressApplyConfiguration struct {
     	Ports    []PortStatusApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// LoadBalancerIngressApplyConfiguration constructs an declarative configuration of the LoadBalancerIngress type for use with
    +// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with
     // apply.
     func LoadBalancerIngress() *LoadBalancerIngressApplyConfiguration {
     	return &LoadBalancerIngressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
    index 2fcc0cad18..bb3d616c15 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// LoadBalancerStatusApplyConfiguration represents an declarative configuration of the LoadBalancerStatus type for use
    +// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use
     // with apply.
     type LoadBalancerStatusApplyConfiguration struct {
     	Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
     }
     
    -// LoadBalancerStatusApplyConfiguration constructs an declarative configuration of the LoadBalancerStatus type for use with
    +// LoadBalancerStatusApplyConfiguration constructs a declarative configuration of the LoadBalancerStatus type for use with
     // apply.
     func LoadBalancerStatus() *LoadBalancerStatusApplyConfiguration {
     	return &LoadBalancerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
    index 7662e32b31..c55d6803dc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// LocalObjectReferenceApplyConfiguration represents an declarative configuration of the LocalObjectReference type for use
    +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use
     // with apply.
     type LocalObjectReferenceApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// LocalObjectReferenceApplyConfiguration constructs an declarative configuration of the LocalObjectReference type for use with
    +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with
     // apply.
     func LocalObjectReference() *LocalObjectReferenceApplyConfiguration {
     	return &LocalObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
    index 5d289bd12d..db711d9934 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// LocalVolumeSourceApplyConfiguration represents an declarative configuration of the LocalVolumeSource type for use
    +// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use
     // with apply.
     type LocalVolumeSourceApplyConfiguration struct {
     	Path   *string `json:"path,omitempty"`
     	FSType *string `json:"fsType,omitempty"`
     }
     
    -// LocalVolumeSourceApplyConfiguration constructs an declarative configuration of the LocalVolumeSource type for use with
    +// LocalVolumeSourceApplyConfiguration constructs a declarative configuration of the LocalVolumeSource type for use with
     // apply.
     func LocalVolumeSource() *LocalVolumeSourceApplyConfiguration {
     	return &LocalVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
    index 4ff1d040cf..704c321652 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use
    +// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use
     // with apply.
     type ModifyVolumeStatusApplyConfiguration struct {
     	TargetVolumeAttributesClassName *string                                     `json:"targetVolumeAttributesClassName,omitempty"`
     	Status                          *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"`
     }
     
    -// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with
    +// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with
     // apply.
     func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration {
     	return &ModifyVolumeStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
    index bdc9ef167c..0b77af183d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NamespaceApplyConfiguration represents an declarative configuration of the Namespace type for use
    +// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use
     // with apply.
     type NamespaceApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type NamespaceApplyConfiguration struct {
     	Status                           *NamespaceStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Namespace constructs an declarative configuration of the Namespace type for use with
    +// Namespace constructs a declarative configuration of the Namespace type for use with
     // apply.
     func Namespace(name string) *NamespaceApplyConfiguration {
     	b := &NamespaceApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *NamespaceApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
    index 8651978b0f..9784c3e6f4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// NamespaceConditionApplyConfiguration represents an declarative configuration of the NamespaceCondition type for use
    +// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use
     // with apply.
     type NamespaceConditionApplyConfiguration struct {
     	Type               *v1.NamespaceConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type NamespaceConditionApplyConfiguration struct {
     	Message            *string                    `json:"message,omitempty"`
     }
     
    -// NamespaceConditionApplyConfiguration constructs an declarative configuration of the NamespaceCondition type for use with
    +// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with
     // apply.
     func NamespaceCondition() *NamespaceConditionApplyConfiguration {
     	return &NamespaceConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
    index 9bc02d1fa2..6d7b7f1f95 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// NamespaceSpecApplyConfiguration represents an declarative configuration of the NamespaceSpec type for use
    +// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use
     // with apply.
     type NamespaceSpecApplyConfiguration struct {
     	Finalizers []v1.FinalizerName `json:"finalizers,omitempty"`
     }
     
    -// NamespaceSpecApplyConfiguration constructs an declarative configuration of the NamespaceSpec type for use with
    +// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with
     // apply.
     func NamespaceSpec() *NamespaceSpecApplyConfiguration {
     	return &NamespaceSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
    index d950fd3161..314908109f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// NamespaceStatusApplyConfiguration represents an declarative configuration of the NamespaceStatus type for use
    +// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use
     // with apply.
     type NamespaceStatusApplyConfiguration struct {
     	Phase      *v1.NamespacePhase                     `json:"phase,omitempty"`
     	Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// NamespaceStatusApplyConfiguration constructs an declarative configuration of the NamespaceStatus type for use with
    +// NamespaceStatusApplyConfiguration constructs a declarative configuration of the NamespaceStatus type for use with
     // apply.
     func NamespaceStatus() *NamespaceStatusApplyConfiguration {
     	return &NamespaceStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
    index cb300ee81e..ed49a87a9e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// NFSVolumeSourceApplyConfiguration represents an declarative configuration of the NFSVolumeSource type for use
    +// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use
     // with apply.
     type NFSVolumeSourceApplyConfiguration struct {
     	Server   *string `json:"server,omitempty"`
    @@ -26,7 +26,7 @@ type NFSVolumeSourceApplyConfiguration struct {
     	ReadOnly *bool   `json:"readOnly,omitempty"`
     }
     
    -// NFSVolumeSourceApplyConfiguration constructs an declarative configuration of the NFSVolumeSource type for use with
    +// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with
     // apply.
     func NFSVolumeSource() *NFSVolumeSourceApplyConfiguration {
     	return &NFSVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
    index 047f4ac1cb..ef13392591 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NodeApplyConfiguration represents an declarative configuration of the Node type for use
    +// NodeApplyConfiguration represents a declarative configuration of the Node type for use
     // with apply.
     type NodeApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type NodeApplyConfiguration struct {
     	Status                           *NodeStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Node constructs an declarative configuration of the Node type for use with
    +// Node constructs a declarative configuration of the Node type for use with
     // apply.
     func Node(name string) *NodeApplyConfiguration {
     	b := &NodeApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration)
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *NodeApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
    index a1d4fbe04e..a9cb036c54 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// NodeAddressApplyConfiguration represents an declarative configuration of the NodeAddress type for use
    +// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use
     // with apply.
     type NodeAddressApplyConfiguration struct {
     	Type    *v1.NodeAddressType `json:"type,omitempty"`
     	Address *string             `json:"address,omitempty"`
     }
     
    -// NodeAddressApplyConfiguration constructs an declarative configuration of the NodeAddress type for use with
    +// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with
     // apply.
     func NodeAddress() *NodeAddressApplyConfiguration {
     	return &NodeAddressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
    index e28ced6e46..5d11d746dc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NodeAffinityApplyConfiguration represents an declarative configuration of the NodeAffinity type for use
    +// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use
     // with apply.
     type NodeAffinityApplyConfiguration struct {
     	RequiredDuringSchedulingIgnoredDuringExecution  *NodeSelectorApplyConfiguration             `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
     	PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
     }
     
    -// NodeAffinityApplyConfiguration constructs an declarative configuration of the NodeAffinity type for use with
    +// NodeAffinityApplyConfiguration constructs a declarative configuration of the NodeAffinity type for use with
     // apply.
     func NodeAffinity() *NodeAffinityApplyConfiguration {
     	return &NodeAffinityApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
    index eb81ca543f..a1b8ed0f38 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// NodeConditionApplyConfiguration represents an declarative configuration of the NodeCondition type for use
    +// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use
     // with apply.
     type NodeConditionApplyConfiguration struct {
     	Type               *v1.NodeConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type NodeConditionApplyConfiguration struct {
     	Message            *string               `json:"message,omitempty"`
     }
     
    -// NodeConditionApplyConfiguration constructs an declarative configuration of the NodeCondition type for use with
    +// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with
     // apply.
     func NodeCondition() *NodeConditionApplyConfiguration {
     	return &NodeConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
    index 60567aa431..00a671fc0c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// NodeConfigSourceApplyConfiguration represents an declarative configuration of the NodeConfigSource type for use
    +// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use
     // with apply.
     type NodeConfigSourceApplyConfiguration struct {
     	ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"`
     }
     
    -// NodeConfigSourceApplyConfiguration constructs an declarative configuration of the NodeConfigSource type for use with
    +// NodeConfigSourceApplyConfiguration constructs a declarative configuration of the NodeConfigSource type for use with
     // apply.
     func NodeConfigSource() *NodeConfigSourceApplyConfiguration {
     	return &NodeConfigSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
    index 71447fe9c0..d5ccc45c6a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// NodeConfigStatusApplyConfiguration represents an declarative configuration of the NodeConfigStatus type for use
    +// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use
     // with apply.
     type NodeConfigStatusApplyConfiguration struct {
     	Assigned      *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"`
    @@ -27,7 +27,7 @@ type NodeConfigStatusApplyConfiguration struct {
     	Error         *string                             `json:"error,omitempty"`
     }
     
    -// NodeConfigStatusApplyConfiguration constructs an declarative configuration of the NodeConfigStatus type for use with
    +// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with
     // apply.
     func NodeConfigStatus() *NodeConfigStatusApplyConfiguration {
     	return &NodeConfigStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
    index 4cabc7f526..11228b3691 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// NodeDaemonEndpointsApplyConfiguration represents an declarative configuration of the NodeDaemonEndpoints type for use
    +// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use
     // with apply.
     type NodeDaemonEndpointsApplyConfiguration struct {
     	KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"`
     }
     
    -// NodeDaemonEndpointsApplyConfiguration constructs an declarative configuration of the NodeDaemonEndpoints type for use with
    +// NodeDaemonEndpointsApplyConfiguration constructs a declarative configuration of the NodeDaemonEndpoints type for use with
     // apply.
     func NodeDaemonEndpoints() *NodeDaemonEndpointsApplyConfiguration {
     	return &NodeDaemonEndpointsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go
    new file mode 100644
    index 0000000000..678b0e36d6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use
    +// with apply.
    +type NodeFeaturesApplyConfiguration struct {
    +	SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"`
    +}
    +
    +// NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with
    +// apply.
    +func NodeFeatures() *NodeFeaturesApplyConfiguration {
    +	return &NodeFeaturesApplyConfiguration{}
    +}
    +
    +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call.
    +func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration {
    +	b.SupplementalGroupsPolicy = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
    index 9ada0a18ef..c7c664974e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NodeRuntimeHandlerApplyConfiguration represents an declarative configuration of the NodeRuntimeHandler type for use
    +// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use
     // with apply.
     type NodeRuntimeHandlerApplyConfiguration struct {
     	Name     *string                                       `json:"name,omitempty"`
     	Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"`
     }
     
    -// NodeRuntimeHandlerApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandler type for use with
    +// NodeRuntimeHandlerApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandler type for use with
     // apply.
     func NodeRuntimeHandler() *NodeRuntimeHandlerApplyConfiguration {
     	return &NodeRuntimeHandlerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
    index a3e3a52e88..a295b60969 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
    @@ -18,13 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NodeRuntimeHandlerFeaturesApplyConfiguration represents an declarative configuration of the NodeRuntimeHandlerFeatures type for use
    +// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use
     // with apply.
     type NodeRuntimeHandlerFeaturesApplyConfiguration struct {
     	RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"`
    +	UserNamespaces          *bool `json:"userNamespaces,omitempty"`
     }
     
    -// NodeRuntimeHandlerFeaturesApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandlerFeatures type for use with
    +// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with
     // apply.
     func NodeRuntimeHandlerFeatures() *NodeRuntimeHandlerFeaturesApplyConfiguration {
     	return &NodeRuntimeHandlerFeaturesApplyConfiguration{}
    @@ -37,3 +38,11 @@ func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithRecursiveReadOnlyMoun
     	b.RecursiveReadOnlyMounts = &value
     	return b
     }
    +
    +// WithUserNamespaces sets the UserNamespaces field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UserNamespaces field is set to the value of the last call.
    +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithUserNamespaces(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration {
    +	b.UserNamespaces = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
    index 5489097f5a..6eab109795 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// NodeSelectorApplyConfiguration represents an declarative configuration of the NodeSelector type for use
    +// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use
     // with apply.
     type NodeSelectorApplyConfiguration struct {
     	NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"`
     }
     
    -// NodeSelectorApplyConfiguration constructs an declarative configuration of the NodeSelector type for use with
    +// NodeSelectorApplyConfiguration constructs a declarative configuration of the NodeSelector type for use with
     // apply.
     func NodeSelector() *NodeSelectorApplyConfiguration {
     	return &NodeSelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
    index a6e43e607e..7c383e06c2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// NodeSelectorRequirementApplyConfiguration represents an declarative configuration of the NodeSelectorRequirement type for use
    +// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use
     // with apply.
     type NodeSelectorRequirementApplyConfiguration struct {
     	Key      *string                  `json:"key,omitempty"`
    @@ -30,7 +30,7 @@ type NodeSelectorRequirementApplyConfiguration struct {
     	Values   []string                 `json:"values,omitempty"`
     }
     
    -// NodeSelectorRequirementApplyConfiguration constructs an declarative configuration of the NodeSelectorRequirement type for use with
    +// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with
     // apply.
     func NodeSelectorRequirement() *NodeSelectorRequirementApplyConfiguration {
     	return &NodeSelectorRequirementApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
    index 13b3ddbc1b..9d0d780f3e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NodeSelectorTermApplyConfiguration represents an declarative configuration of the NodeSelectorTerm type for use
    +// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use
     // with apply.
     type NodeSelectorTermApplyConfiguration struct {
     	MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
     	MatchFields      []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"`
     }
     
    -// NodeSelectorTermApplyConfiguration constructs an declarative configuration of the NodeSelectorTerm type for use with
    +// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with
     // apply.
     func NodeSelectorTerm() *NodeSelectorTermApplyConfiguration {
     	return &NodeSelectorTermApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
    index 63b61078d0..8ac3497127 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use
    +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use
     // with apply.
     type NodeSpecApplyConfiguration struct {
     	PodCIDR            *string                             `json:"podCIDR,omitempty"`
    @@ -30,7 +30,7 @@ type NodeSpecApplyConfiguration struct {
     	DoNotUseExternalID *string                             `json:"externalID,omitempty"`
     }
     
    -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with
    +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with
     // apply.
     func NodeSpec() *NodeSpecApplyConfiguration {
     	return &NodeSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
    index a4a30a2685..8411c57ac0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// NodeStatusApplyConfiguration represents an declarative configuration of the NodeStatus type for use
    +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use
     // with apply.
     type NodeStatusApplyConfiguration struct {
     	Capacity        *v1.ResourceList                       `json:"capacity,omitempty"`
    @@ -37,9 +37,10 @@ type NodeStatusApplyConfiguration struct {
     	VolumesAttached []AttachedVolumeApplyConfiguration     `json:"volumesAttached,omitempty"`
     	Config          *NodeConfigStatusApplyConfiguration    `json:"config,omitempty"`
     	RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"`
    +	Features        *NodeFeaturesApplyConfiguration        `json:"features,omitempty"`
     }
     
    -// NodeStatusApplyConfiguration constructs an declarative configuration of the NodeStatus type for use with
    +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with
     // apply.
     func NodeStatus() *NodeStatusApplyConfiguration {
     	return &NodeStatusApplyConfiguration{}
    @@ -167,3 +168,11 @@ func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntim
     	}
     	return b
     }
    +
    +// WithFeatures sets the Features field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Features field is set to the value of the last call.
    +func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration {
    +	b.Features = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
    index 2634ea9842..11ac50713c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// NodeSystemInfoApplyConfiguration represents an declarative configuration of the NodeSystemInfo type for use
    +// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use
     // with apply.
     type NodeSystemInfoApplyConfiguration struct {
     	MachineID               *string `json:"machineID,omitempty"`
    @@ -33,7 +33,7 @@ type NodeSystemInfoApplyConfiguration struct {
     	Architecture            *string `json:"architecture,omitempty"`
     }
     
    -// NodeSystemInfoApplyConfiguration constructs an declarative configuration of the NodeSystemInfo type for use with
    +// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with
     // apply.
     func NodeSystemInfo() *NodeSystemInfoApplyConfiguration {
     	return &NodeSystemInfoApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
    index 0c2402b3c7..c129c998b1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ObjectFieldSelectorApplyConfiguration represents an declarative configuration of the ObjectFieldSelector type for use
    +// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use
     // with apply.
     type ObjectFieldSelectorApplyConfiguration struct {
     	APIVersion *string `json:"apiVersion,omitempty"`
     	FieldPath  *string `json:"fieldPath,omitempty"`
     }
     
    -// ObjectFieldSelectorApplyConfiguration constructs an declarative configuration of the ObjectFieldSelector type for use with
    +// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with
     // apply.
     func ObjectFieldSelector() *ObjectFieldSelectorApplyConfiguration {
     	return &ObjectFieldSelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
    index 667fa84a81..4cd3f226ef 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
    @@ -22,7 +22,7 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use
    +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use
     // with apply.
     type ObjectReferenceApplyConfiguration struct {
     	Kind            *string    `json:"kind,omitempty"`
    @@ -34,7 +34,7 @@ type ObjectReferenceApplyConfiguration struct {
     	FieldPath       *string    `json:"fieldPath,omitempty"`
     }
     
    -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with
    +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with
     // apply.
     func ObjectReference() *ObjectReferenceApplyConfiguration {
     	return &ObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
    index 2599c197e8..020f87411e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PersistentVolumeApplyConfiguration represents an declarative configuration of the PersistentVolume type for use
    +// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use
     // with apply.
     type PersistentVolumeApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PersistentVolumeApplyConfiguration struct {
     	Status                           *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PersistentVolume constructs an declarative configuration of the PersistentVolume type for use with
    +// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with
     // apply.
     func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
     	b := &PersistentVolumeApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PersistentVolumeApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
    index a0a0017018..81cf791443 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PersistentVolumeClaimApplyConfiguration represents an declarative configuration of the PersistentVolumeClaim type for use
    +// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use
     // with apply.
     type PersistentVolumeClaimApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PersistentVolumeClaimApplyConfiguration struct {
     	Status                           *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PersistentVolumeClaim constructs an declarative configuration of the PersistentVolumeClaim type for use with
    +// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with
     // apply.
     func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyConfiguration {
     	b := &PersistentVolumeClaimApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
    index 65449e92eb..80038c0677 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PersistentVolumeClaimConditionApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimCondition type for use
    +// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use
     // with apply.
     type PersistentVolumeClaimConditionApplyConfiguration struct {
     	Type               *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type PersistentVolumeClaimConditionApplyConfiguration struct {
     	Message            *string                                `json:"message,omitempty"`
     }
     
    -// PersistentVolumeClaimConditionApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimCondition type for use with
    +// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with
     // apply.
     func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfiguration {
     	return &PersistentVolumeClaimConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
    index 4db12fadb3..5ce671cd99 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use
    +// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use
     // with apply.
     type PersistentVolumeClaimSpecApplyConfiguration struct {
     	AccessModes               []v1.PersistentVolumeAccessMode               `json:"accessModes,omitempty"`
    @@ -37,7 +37,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct {
     	VolumeAttributesClassName *string                                       `json:"volumeAttributesClassName,omitempty"`
     }
     
    -// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with
    +// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with
     // apply.
     func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration {
     	return &PersistentVolumeClaimSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
    index 1f6d5ae323..3eebf95ad5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use
    +// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use
     // with apply.
     type PersistentVolumeClaimStatusApplyConfiguration struct {
     	Phase                            *v1.PersistentVolumeClaimPhase                     `json:"phase,omitempty"`
    @@ -35,7 +35,7 @@ type PersistentVolumeClaimStatusApplyConfiguration struct {
     	ModifyVolumeStatus               *ModifyVolumeStatusApplyConfiguration              `json:"modifyVolumeStatus,omitempty"`
     }
     
    -// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with
    +// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with
     // apply.
     func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguration {
     	return &PersistentVolumeClaimStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
    index 894d04f0b4..ed49702913 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
    @@ -24,14 +24,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PersistentVolumeClaimTemplateApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimTemplate type for use
    +// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use
     // with apply.
     type PersistentVolumeClaimTemplateApplyConfiguration struct {
     	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
     	Spec                             *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// PersistentVolumeClaimTemplateApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimTemplate type for use with
    +// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with
     // apply.
     func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfiguration {
     	return &PersistentVolumeClaimTemplateApplyConfiguration{}
    @@ -186,3 +186,9 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
    index a498fa6a5e..ccccdfb493 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PersistentVolumeClaimVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimVolumeSource type for use
    +// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use
     // with apply.
     type PersistentVolumeClaimVolumeSourceApplyConfiguration struct {
     	ClaimName *string `json:"claimName,omitempty"`
     	ReadOnly  *bool   `json:"readOnly,omitempty"`
     }
     
    -// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimVolumeSource type for use with
    +// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with
     // apply.
     func PersistentVolumeClaimVolumeSource() *PersistentVolumeClaimVolumeSourceApplyConfiguration {
     	return &PersistentVolumeClaimVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
    index 0576e7dd31..aba0124622 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// PersistentVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeSource type for use
    +// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use
     // with apply.
     type PersistentVolumeSourceApplyConfiguration struct {
     	GCEPersistentDisk    *GCEPersistentDiskVolumeSourceApplyConfiguration    `json:"gcePersistentDisk,omitempty"`
    @@ -45,7 +45,7 @@ type PersistentVolumeSourceApplyConfiguration struct {
     	CSI                  *CSIPersistentVolumeSourceApplyConfiguration        `json:"csi,omitempty"`
     }
     
    -// PersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeSource type for use with
    +// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with
     // apply.
     func PersistentVolumeSource() *PersistentVolumeSourceApplyConfiguration {
     	return &PersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
    index 8a30dab649..074fa55d1f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PersistentVolumeSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeSpec type for use
    +// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use
     // with apply.
     type PersistentVolumeSpecApplyConfiguration struct {
     	Capacity                                 *v1.ResourceList `json:"capacity,omitempty"`
    @@ -37,7 +37,7 @@ type PersistentVolumeSpecApplyConfiguration struct {
     	VolumeAttributesClassName                *string                               `json:"volumeAttributesClassName,omitempty"`
     }
     
    -// PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with
    +// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with
     // apply.
     func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration {
     	return &PersistentVolumeSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
    index a473c0e927..95ba90f48b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use
    +// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use
     // with apply.
     type PersistentVolumeStatusApplyConfiguration struct {
     	Phase                   *v1.PersistentVolumePhase `json:"phase,omitempty"`
    @@ -32,7 +32,7 @@ type PersistentVolumeStatusApplyConfiguration struct {
     	LastPhaseTransitionTime *metav1.Time              `json:"lastPhaseTransitionTime,omitempty"`
     }
     
    -// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with
    +// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with
     // apply.
     func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration {
     	return &PersistentVolumeStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
    index 43587d6768..d8dc103e2a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PhotonPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the PhotonPersistentDiskVolumeSource type for use
    +// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use
     // with apply.
     type PhotonPersistentDiskVolumeSourceApplyConfiguration struct {
     	PdID   *string `json:"pdID,omitempty"`
     	FSType *string `json:"fsType,omitempty"`
     }
     
    -// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the PhotonPersistentDiskVolumeSource type for use with
    +// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the PhotonPersistentDiskVolumeSource type for use with
     // apply.
     func PhotonPersistentDiskVolumeSource() *PhotonPersistentDiskVolumeSourceApplyConfiguration {
     	return &PhotonPersistentDiskVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
    index 7210bd9836..507d57d6f3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodApplyConfiguration represents an declarative configuration of the Pod type for use
    +// PodApplyConfiguration represents a declarative configuration of the Pod type for use
     // with apply.
     type PodApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PodApplyConfiguration struct {
     	Status                           *PodStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Pod constructs an declarative configuration of the Pod type for use with
    +// Pod constructs a declarative configuration of the Pod type for use with
     // apply.
     func Pod(name, namespace string) *PodApplyConfiguration {
     	b := &PodApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) *
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
    index 7049c62121..23fed95464 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PodAffinityApplyConfiguration represents an declarative configuration of the PodAffinity type for use
    +// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use
     // with apply.
     type PodAffinityApplyConfiguration struct {
     	RequiredDuringSchedulingIgnoredDuringExecution  []PodAffinityTermApplyConfiguration         `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
     	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
     }
     
    -// PodAffinityApplyConfiguration constructs an declarative configuration of the PodAffinity type for use with
    +// PodAffinityApplyConfiguration constructs a declarative configuration of the PodAffinity type for use with
     // apply.
     func PodAffinity() *PodAffinityApplyConfiguration {
     	return &PodAffinityApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
    index ac1eab3d8c..3afce026d4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodAffinityTermApplyConfiguration represents an declarative configuration of the PodAffinityTerm type for use
    +// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use
     // with apply.
     type PodAffinityTermApplyConfiguration struct {
     	LabelSelector     *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
    @@ -33,7 +33,7 @@ type PodAffinityTermApplyConfiguration struct {
     	MismatchLabelKeys []string                            `json:"mismatchLabelKeys,omitempty"`
     }
     
    -// PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with
    +// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with
     // apply.
     func PodAffinityTerm() *PodAffinityTermApplyConfiguration {
     	return &PodAffinityTermApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
    index 42681c54c4..ae9848963d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PodAntiAffinityApplyConfiguration represents an declarative configuration of the PodAntiAffinity type for use
    +// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use
     // with apply.
     type PodAntiAffinityApplyConfiguration struct {
     	RequiredDuringSchedulingIgnoredDuringExecution  []PodAffinityTermApplyConfiguration         `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
     	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
     }
     
    -// PodAntiAffinityApplyConfiguration constructs an declarative configuration of the PodAntiAffinity type for use with
    +// PodAntiAffinityApplyConfiguration constructs a declarative configuration of the PodAntiAffinity type for use with
     // apply.
     func PodAntiAffinity() *PodAntiAffinityApplyConfiguration {
     	return &PodAntiAffinityApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
    index 610209f3c4..98968d26d0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PodConditionApplyConfiguration represents an declarative configuration of the PodCondition type for use
    +// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use
     // with apply.
     type PodConditionApplyConfiguration struct {
     	Type               *v1.PodConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type PodConditionApplyConfiguration struct {
     	Message            *string              `json:"message,omitempty"`
     }
     
    -// PodConditionApplyConfiguration constructs an declarative configuration of the PodCondition type for use with
    +// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with
     // apply.
     func PodCondition() *PodConditionApplyConfiguration {
     	return &PodConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
    index 0fe6a08349..2e0ce9a91e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// PodDNSConfigApplyConfiguration represents an declarative configuration of the PodDNSConfig type for use
    +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use
     // with apply.
     type PodDNSConfigApplyConfiguration struct {
     	Nameservers []string                               `json:"nameservers,omitempty"`
    @@ -26,7 +26,7 @@ type PodDNSConfigApplyConfiguration struct {
     	Options     []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"`
     }
     
    -// PodDNSConfigApplyConfiguration constructs an declarative configuration of the PodDNSConfig type for use with
    +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with
     // apply.
     func PodDNSConfig() *PodDNSConfigApplyConfiguration {
     	return &PodDNSConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
    index 327bf803b3..458b333bf2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PodDNSConfigOptionApplyConfiguration represents an declarative configuration of the PodDNSConfigOption type for use
    +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use
     // with apply.
     type PodDNSConfigOptionApplyConfiguration struct {
     	Name  *string `json:"name,omitempty"`
     	Value *string `json:"value,omitempty"`
     }
     
    -// PodDNSConfigOptionApplyConfiguration constructs an declarative configuration of the PodDNSConfigOption type for use with
    +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with
     // apply.
     func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration {
     	return &PodDNSConfigOptionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
    index 3c6e6b87ac..73f089856f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// PodIPApplyConfiguration represents an declarative configuration of the PodIP type for use
    +// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use
     // with apply.
     type PodIPApplyConfiguration struct {
     	IP *string `json:"ip,omitempty"`
     }
     
    -// PodIPApplyConfiguration constructs an declarative configuration of the PodIP type for use with
    +// PodIPApplyConfiguration constructs a declarative configuration of the PodIP type for use with
     // apply.
     func PodIP() *PodIPApplyConfiguration {
     	return &PodIPApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
    index a5315d636b..7f156f817d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use
    +// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use
     // with apply.
     type PodOSApplyConfiguration struct {
     	Name *v1.OSName `json:"name,omitempty"`
     }
     
    -// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with
    +// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with
     // apply.
     func PodOS() *PodOSApplyConfiguration {
     	return &PodOSApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
    index 9d3ad458ac..09746df1bd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PodReadinessGateApplyConfiguration represents an declarative configuration of the PodReadinessGate type for use
    +// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use
     // with apply.
     type PodReadinessGateApplyConfiguration struct {
     	ConditionType *v1.PodConditionType `json:"conditionType,omitempty"`
     }
     
    -// PodReadinessGateApplyConfiguration constructs an declarative configuration of the PodReadinessGate type for use with
    +// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with
     // apply.
     func PodReadinessGate() *PodReadinessGateApplyConfiguration {
     	return &PodReadinessGateApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
    index 69b250d474..b0bd67fa11 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
    @@ -18,14 +18,15 @@ limitations under the License.
     
     package v1
     
    -// PodResourceClaimApplyConfiguration represents an declarative configuration of the PodResourceClaim type for use
    +// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use
     // with apply.
     type PodResourceClaimApplyConfiguration struct {
    -	Name   *string                        `json:"name,omitempty"`
    -	Source *ClaimSourceApplyConfiguration `json:"source,omitempty"`
    +	Name                      *string `json:"name,omitempty"`
    +	ResourceClaimName         *string `json:"resourceClaimName,omitempty"`
    +	ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"`
     }
     
    -// PodResourceClaimApplyConfiguration constructs an declarative configuration of the PodResourceClaim type for use with
    +// PodResourceClaimApplyConfiguration constructs a declarative configuration of the PodResourceClaim type for use with
     // apply.
     func PodResourceClaim() *PodResourceClaimApplyConfiguration {
     	return &PodResourceClaimApplyConfiguration{}
    @@ -39,10 +40,18 @@ func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResource
     	return b
     }
     
    -// WithSource sets the Source field in the declarative configuration to the given value
    +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value
     // and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Source field is set to the value of the last call.
    -func (b *PodResourceClaimApplyConfiguration) WithSource(value *ClaimSourceApplyConfiguration) *PodResourceClaimApplyConfiguration {
    -	b.Source = value
    +// If called multiple times, the ResourceClaimName field is set to the value of the last call.
    +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimApplyConfiguration {
    +	b.ResourceClaimName = &value
    +	return b
    +}
    +
    +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call.
    +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimTemplateName(value string) *PodResourceClaimApplyConfiguration {
    +	b.ResourceClaimTemplateName = &value
     	return b
     }
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
    index ae79ca01b7..f60ad4b052 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use
    +// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use
     // with apply.
     type PodResourceClaimStatusApplyConfiguration struct {
     	Name              *string `json:"name,omitempty"`
     	ResourceClaimName *string `json:"resourceClaimName,omitempty"`
     }
     
    -// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with
    +// PodResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodResourceClaimStatus type for use with
     // apply.
     func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration {
     	return &PodResourceClaimStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
    index f7649c2e92..3d91092776 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// PodSchedulingGateApplyConfiguration represents an declarative configuration of the PodSchedulingGate type for use
    +// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use
     // with apply.
     type PodSchedulingGateApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// PodSchedulingGateApplyConfiguration constructs an declarative configuration of the PodSchedulingGate type for use with
    +// PodSchedulingGateApplyConfiguration constructs a declarative configuration of the PodSchedulingGate type for use with
     // apply.
     func PodSchedulingGate() *PodSchedulingGateApplyConfiguration {
     	return &PodSchedulingGateApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
    index 6b340294eb..55085e6307 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
    @@ -22,23 +22,24 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// PodSecurityContextApplyConfiguration represents an declarative configuration of the PodSecurityContext type for use
    +// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use
     // with apply.
     type PodSecurityContextApplyConfiguration struct {
    -	SELinuxOptions      *SELinuxOptionsApplyConfiguration                `json:"seLinuxOptions,omitempty"`
    -	WindowsOptions      *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
    -	RunAsUser           *int64                                           `json:"runAsUser,omitempty"`
    -	RunAsGroup          *int64                                           `json:"runAsGroup,omitempty"`
    -	RunAsNonRoot        *bool                                            `json:"runAsNonRoot,omitempty"`
    -	SupplementalGroups  []int64                                          `json:"supplementalGroups,omitempty"`
    -	FSGroup             *int64                                           `json:"fsGroup,omitempty"`
    -	Sysctls             []SysctlApplyConfiguration                       `json:"sysctls,omitempty"`
    -	FSGroupChangePolicy *corev1.PodFSGroupChangePolicy                   `json:"fsGroupChangePolicy,omitempty"`
    -	SeccompProfile      *SeccompProfileApplyConfiguration                `json:"seccompProfile,omitempty"`
    -	AppArmorProfile     *AppArmorProfileApplyConfiguration               `json:"appArmorProfile,omitempty"`
    +	SELinuxOptions           *SELinuxOptionsApplyConfiguration                `json:"seLinuxOptions,omitempty"`
    +	WindowsOptions           *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
    +	RunAsUser                *int64                                           `json:"runAsUser,omitempty"`
    +	RunAsGroup               *int64                                           `json:"runAsGroup,omitempty"`
    +	RunAsNonRoot             *bool                                            `json:"runAsNonRoot,omitempty"`
    +	SupplementalGroups       []int64                                          `json:"supplementalGroups,omitempty"`
    +	SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy                 `json:"supplementalGroupsPolicy,omitempty"`
    +	FSGroup                  *int64                                           `json:"fsGroup,omitempty"`
    +	Sysctls                  []SysctlApplyConfiguration                       `json:"sysctls,omitempty"`
    +	FSGroupChangePolicy      *corev1.PodFSGroupChangePolicy                   `json:"fsGroupChangePolicy,omitempty"`
    +	SeccompProfile           *SeccompProfileApplyConfiguration                `json:"seccompProfile,omitempty"`
    +	AppArmorProfile          *AppArmorProfileApplyConfiguration               `json:"appArmorProfile,omitempty"`
     }
     
    -// PodSecurityContextApplyConfiguration constructs an declarative configuration of the PodSecurityContext type for use with
    +// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with
     // apply.
     func PodSecurityContext() *PodSecurityContextApplyConfiguration {
     	return &PodSecurityContextApplyConfiguration{}
    @@ -94,6 +95,14 @@ func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroups(values ...
     	return b
     }
     
    +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call.
    +func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroupsPolicy(value corev1.SupplementalGroupsPolicy) *PodSecurityContextApplyConfiguration {
    +	b.SupplementalGroupsPolicy = &value
    +	return b
    +}
    +
     // WithFSGroup sets the FSGroup field in the declarative configuration to the given value
     // and returns the receiver, so that objects can be built by chaining "With" function invocations.
     // If called multiple times, the FSGroup field is set to the value of the last call.
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
    index a9acd36fc7..8134e044f1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// PodSpecApplyConfiguration represents an declarative configuration of the PodSpec type for use
    +// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use
     // with apply.
     type PodSpecApplyConfiguration struct {
     	Volumes                       []VolumeApplyConfiguration                   `json:"volumes,omitempty"`
    @@ -66,7 +66,7 @@ type PodSpecApplyConfiguration struct {
     	ResourceClaims                []PodResourceClaimApplyConfiguration         `json:"resourceClaims,omitempty"`
     }
     
    -// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with
    +// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with
     // apply.
     func PodSpec() *PodSpecApplyConfiguration {
     	return &PodSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
    index 1a58ab6be2..0b68996cd1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use
    +// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use
     // with apply.
     type PodStatusApplyConfiguration struct {
     	Phase                      *v1.PodPhase                               `json:"phase,omitempty"`
    @@ -44,7 +44,7 @@ type PodStatusApplyConfiguration struct {
     	ResourceClaimStatuses      []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"`
     }
     
    -// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with
    +// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with
     // apply.
     func PodStatus() *PodStatusApplyConfiguration {
     	return &PodStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
    index 7fe51d9e1b..b4c8a658a4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodTemplateApplyConfiguration represents an declarative configuration of the PodTemplate type for use
    +// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use
     // with apply.
     type PodTemplateApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type PodTemplateApplyConfiguration struct {
     	Template                         *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
     }
     
    -// PodTemplate constructs an declarative configuration of the PodTemplate type for use with
    +// PodTemplate constructs a declarative configuration of the PodTemplate type for use with
     // apply.
     func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration {
     	b := &PodTemplateApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply
     	b.Template = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodTemplateApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
    index 82878a9ace..6146c01c7c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
    @@ -24,14 +24,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodTemplateSpecApplyConfiguration represents an declarative configuration of the PodTemplateSpec type for use
    +// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use
     // with apply.
     type PodTemplateSpecApplyConfiguration struct {
     	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
     	Spec                             *PodSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// PodTemplateSpecApplyConfiguration constructs an declarative configuration of the PodTemplateSpec type for use with
    +// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with
     // apply.
     func PodTemplateSpec() *PodTemplateSpecApplyConfiguration {
     	return &PodTemplateSpecApplyConfiguration{}
    @@ -186,3 +186,9 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodTemplateSpecApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
    index 8c70c8f6cf..5e738cabdb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// PortStatusApplyConfiguration represents an declarative configuration of the PortStatus type for use
    +// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use
     // with apply.
     type PortStatusApplyConfiguration struct {
     	Port     *int32       `json:"port,omitempty"`
    @@ -30,7 +30,7 @@ type PortStatusApplyConfiguration struct {
     	Error    *string      `json:"error,omitempty"`
     }
     
    -// PortStatusApplyConfiguration constructs an declarative configuration of the PortStatus type for use with
    +// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with
     // apply.
     func PortStatus() *PortStatusApplyConfiguration {
     	return &PortStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
    index 19cbb82edb..29715e0219 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// PortworxVolumeSourceApplyConfiguration represents an declarative configuration of the PortworxVolumeSource type for use
    +// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use
     // with apply.
     type PortworxVolumeSourceApplyConfiguration struct {
     	VolumeID *string `json:"volumeID,omitempty"`
    @@ -26,7 +26,7 @@ type PortworxVolumeSourceApplyConfiguration struct {
     	ReadOnly *bool   `json:"readOnly,omitempty"`
     }
     
    -// PortworxVolumeSourceApplyConfiguration constructs an declarative configuration of the PortworxVolumeSource type for use with
    +// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with
     // apply.
     func PortworxVolumeSource() *PortworxVolumeSourceApplyConfiguration {
     	return &PortworxVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
    index a373e4afe0..b88a3646fc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// PreferredSchedulingTermApplyConfiguration represents an declarative configuration of the PreferredSchedulingTerm type for use
    +// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use
     // with apply.
     type PreferredSchedulingTermApplyConfiguration struct {
     	Weight     *int32                              `json:"weight,omitempty"`
     	Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"`
     }
     
    -// PreferredSchedulingTermApplyConfiguration constructs an declarative configuration of the PreferredSchedulingTerm type for use with
    +// PreferredSchedulingTermApplyConfiguration constructs a declarative configuration of the PreferredSchedulingTerm type for use with
     // apply.
     func PreferredSchedulingTerm() *PreferredSchedulingTermApplyConfiguration {
     	return &PreferredSchedulingTermApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
    index 10730557a0..3be1c9650b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ProbeApplyConfiguration represents an declarative configuration of the Probe type for use
    +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use
     // with apply.
     type ProbeApplyConfiguration struct {
     	ProbeHandlerApplyConfiguration `json:",inline"`
    @@ -30,7 +30,7 @@ type ProbeApplyConfiguration struct {
     	TerminationGracePeriodSeconds  *int64 `json:"terminationGracePeriodSeconds,omitempty"`
     }
     
    -// ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with
    +// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with
     // apply.
     func Probe() *ProbeApplyConfiguration {
     	return &ProbeApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
    index 54f3344ac7..1f88745eab 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use
    +// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use
     // with apply.
     type ProbeHandlerApplyConfiguration struct {
     	Exec      *ExecActionApplyConfiguration      `json:"exec,omitempty"`
    @@ -27,7 +27,7 @@ type ProbeHandlerApplyConfiguration struct {
     	GRPC      *GRPCActionApplyConfiguration      `json:"grpc,omitempty"`
     }
     
    -// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with
    +// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with
     // apply.
     func ProbeHandler() *ProbeHandlerApplyConfiguration {
     	return &ProbeHandlerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
    index 0a9d1d88e6..c922ec8cc2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ProjectedVolumeSourceApplyConfiguration represents an declarative configuration of the ProjectedVolumeSource type for use
    +// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use
     // with apply.
     type ProjectedVolumeSourceApplyConfiguration struct {
     	Sources     []VolumeProjectionApplyConfiguration `json:"sources,omitempty"`
     	DefaultMode *int32                               `json:"defaultMode,omitempty"`
     }
     
    -// ProjectedVolumeSourceApplyConfiguration constructs an declarative configuration of the ProjectedVolumeSource type for use with
    +// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with
     // apply.
     func ProjectedVolumeSource() *ProjectedVolumeSourceApplyConfiguration {
     	return &ProjectedVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
    index 646052ea4a..9a042a0a12 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// QuobyteVolumeSourceApplyConfiguration represents an declarative configuration of the QuobyteVolumeSource type for use
    +// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use
     // with apply.
     type QuobyteVolumeSourceApplyConfiguration struct {
     	Registry *string `json:"registry,omitempty"`
    @@ -29,7 +29,7 @@ type QuobyteVolumeSourceApplyConfiguration struct {
     	Tenant   *string `json:"tenant,omitempty"`
     }
     
    -// QuobyteVolumeSourceApplyConfiguration constructs an declarative configuration of the QuobyteVolumeSource type for use with
    +// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with
     // apply.
     func QuobyteVolumeSource() *QuobyteVolumeSourceApplyConfiguration {
     	return &QuobyteVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
    index ffcb836eb0..64f25724a3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// RBDPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the RBDPersistentVolumeSource type for use
    +// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use
     // with apply.
     type RBDPersistentVolumeSourceApplyConfiguration struct {
     	CephMonitors []string                           `json:"monitors,omitempty"`
    @@ -31,7 +31,7 @@ type RBDPersistentVolumeSourceApplyConfiguration struct {
     	ReadOnly     *bool                              `json:"readOnly,omitempty"`
     }
     
    -// RBDPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDPersistentVolumeSource type for use with
    +// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with
     // apply.
     func RBDPersistentVolumeSource() *RBDPersistentVolumeSourceApplyConfiguration {
     	return &RBDPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
    index 8e7c81732c..8dae198c09 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// RBDVolumeSourceApplyConfiguration represents an declarative configuration of the RBDVolumeSource type for use
    +// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use
     // with apply.
     type RBDVolumeSourceApplyConfiguration struct {
     	CephMonitors []string                                `json:"monitors,omitempty"`
    @@ -31,7 +31,7 @@ type RBDVolumeSourceApplyConfiguration struct {
     	ReadOnly     *bool                                   `json:"readOnly,omitempty"`
     }
     
    -// RBDVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDVolumeSource type for use with
    +// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with
     // apply.
     func RBDVolumeSource() *RBDVolumeSourceApplyConfiguration {
     	return &RBDVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
    index 7cd71460a9..b28f422dc7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicationControllerApplyConfiguration represents an declarative configuration of the ReplicationController type for use
    +// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use
     // with apply.
     type ReplicationControllerApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ReplicationControllerApplyConfiguration struct {
     	Status                           *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ReplicationController constructs an declarative configuration of the ReplicationController type for use with
    +// ReplicationController constructs a declarative configuration of the ReplicationController type for use with
     // apply.
     func ReplicationController(name, namespace string) *ReplicationControllerApplyConfiguration {
     	b := &ReplicationControllerApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ReplicationControllerApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
    index c3d56cc697..0d74c1db9a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ReplicationControllerConditionApplyConfiguration represents an declarative configuration of the ReplicationControllerCondition type for use
    +// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use
     // with apply.
     type ReplicationControllerConditionApplyConfiguration struct {
     	Type               *v1.ReplicationControllerConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type ReplicationControllerConditionApplyConfiguration struct {
     	Message            *string                                `json:"message,omitempty"`
     }
     
    -// ReplicationControllerConditionApplyConfiguration constructs an declarative configuration of the ReplicationControllerCondition type for use with
    +// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with
     // apply.
     func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfiguration {
     	return &ReplicationControllerConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
    index dd4e081d9f..07bac9f4c9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ReplicationControllerSpecApplyConfiguration represents an declarative configuration of the ReplicationControllerSpec type for use
    +// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use
     // with apply.
     type ReplicationControllerSpecApplyConfiguration struct {
     	Replicas        *int32                             `json:"replicas,omitempty"`
    @@ -27,7 +27,7 @@ type ReplicationControllerSpecApplyConfiguration struct {
     	Template        *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
     }
     
    -// ReplicationControllerSpecApplyConfiguration constructs an declarative configuration of the ReplicationControllerSpec type for use with
    +// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with
     // apply.
     func ReplicationControllerSpec() *ReplicationControllerSpecApplyConfiguration {
     	return &ReplicationControllerSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
    index 1b994cfb8c..c8046aa5a4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ReplicationControllerStatusApplyConfiguration represents an declarative configuration of the ReplicationControllerStatus type for use
    +// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use
     // with apply.
     type ReplicationControllerStatusApplyConfiguration struct {
     	Replicas             *int32                                             `json:"replicas,omitempty"`
    @@ -29,7 +29,7 @@ type ReplicationControllerStatusApplyConfiguration struct {
     	Conditions           []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ReplicationControllerStatusApplyConfiguration constructs an declarative configuration of the ReplicationControllerStatus type for use with
    +// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with
     // apply.
     func ReplicationControllerStatus() *ReplicationControllerStatusApplyConfiguration {
     	return &ReplicationControllerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
    index 064dd4e2e4..b00c692485 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
    @@ -18,13 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use
    +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
     // with apply.
     type ResourceClaimApplyConfiguration struct {
    -	Name *string `json:"name,omitempty"`
    +	Name    *string `json:"name,omitempty"`
    +	Request *string `json:"request,omitempty"`
     }
     
    -// ResourceClaimApplyConfiguration constructs an declarative configuration of the ResourceClaim type for use with
    +// ResourceClaimApplyConfiguration constructs a declarative configuration of the ResourceClaim type for use with
     // apply.
     func ResourceClaim() *ResourceClaimApplyConfiguration {
     	return &ResourceClaimApplyConfiguration{}
    @@ -37,3 +38,11 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA
     	b.Name = &value
     	return b
     }
    +
    +// WithRequest sets the Request field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Request field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithRequest(value string) *ResourceClaimApplyConfiguration {
    +	b.Request = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
    index 2741227dd7..1b4918a633 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
    @@ -22,7 +22,7 @@ import (
     	resource "k8s.io/apimachinery/pkg/api/resource"
     )
     
    -// ResourceFieldSelectorApplyConfiguration represents an declarative configuration of the ResourceFieldSelector type for use
    +// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use
     // with apply.
     type ResourceFieldSelectorApplyConfiguration struct {
     	ContainerName *string            `json:"containerName,omitempty"`
    @@ -30,7 +30,7 @@ type ResourceFieldSelectorApplyConfiguration struct {
     	Divisor       *resource.Quantity `json:"divisor,omitempty"`
     }
     
    -// ResourceFieldSelectorApplyConfiguration constructs an declarative configuration of the ResourceFieldSelector type for use with
    +// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with
     // apply.
     func ResourceFieldSelector() *ResourceFieldSelectorApplyConfiguration {
     	return &ResourceFieldSelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
    new file mode 100644
    index 0000000000..5169cb4bc3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +)
    +
    +// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use
    +// with apply.
    +type ResourceHealthApplyConfiguration struct {
    +	ResourceID *v1.ResourceID           `json:"resourceID,omitempty"`
    +	Health     *v1.ResourceHealthStatus `json:"health,omitempty"`
    +}
    +
    +// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with
    +// apply.
    +func ResourceHealth() *ResourceHealthApplyConfiguration {
    +	return &ResourceHealthApplyConfiguration{}
    +}
    +
    +// WithResourceID sets the ResourceID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceID field is set to the value of the last call.
    +func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration {
    +	b.ResourceID = &value
    +	return b
    +}
    +
    +// WithHealth sets the Health field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Health field is set to the value of the last call.
    +func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration {
    +	b.Health = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
    index 6b22ebdc59..2b78ba7038 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ResourceQuotaApplyConfiguration represents an declarative configuration of the ResourceQuota type for use
    +// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use
     // with apply.
     type ResourceQuotaApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ResourceQuotaApplyConfiguration struct {
     	Status                           *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ResourceQuota constructs an declarative configuration of the ResourceQuota type for use with
    +// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with
     // apply.
     func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
     	b := &ResourceQuotaApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ResourceQuotaApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
    index feb454bc4b..0012ace25f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceQuotaSpecApplyConfiguration represents an declarative configuration of the ResourceQuotaSpec type for use
    +// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use
     // with apply.
     type ResourceQuotaSpecApplyConfiguration struct {
     	Hard          *v1.ResourceList                 `json:"hard,omitempty"`
    @@ -30,7 +30,7 @@ type ResourceQuotaSpecApplyConfiguration struct {
     	ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"`
     }
     
    -// ResourceQuotaSpecApplyConfiguration constructs an declarative configuration of the ResourceQuotaSpec type for use with
    +// ResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ResourceQuotaSpec type for use with
     // apply.
     func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration {
     	return &ResourceQuotaSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
    index 4dced90f7a..364b96eecf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceQuotaStatusApplyConfiguration represents an declarative configuration of the ResourceQuotaStatus type for use
    +// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use
     // with apply.
     type ResourceQuotaStatusApplyConfiguration struct {
     	Hard *v1.ResourceList `json:"hard,omitempty"`
     	Used *v1.ResourceList `json:"used,omitempty"`
     }
     
    -// ResourceQuotaStatusApplyConfiguration constructs an declarative configuration of the ResourceQuotaStatus type for use with
    +// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with
     // apply.
     func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration {
     	return &ResourceQuotaStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
    index 9482b8d713..51197862c9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use
    +// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use
     // with apply.
     type ResourceRequirementsApplyConfiguration struct {
     	Limits   *v1.ResourceList                  `json:"limits,omitempty"`
    @@ -30,7 +30,7 @@ type ResourceRequirementsApplyConfiguration struct {
     	Claims   []ResourceClaimApplyConfiguration `json:"claims,omitempty"`
     }
     
    -// ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with
    +// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with
     // apply.
     func ResourceRequirements() *ResourceRequirementsApplyConfiguration {
     	return &ResourceRequirementsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
    new file mode 100644
    index 0000000000..1e63c87f8c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
    @@ -0,0 +1,57 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +)
    +
    +// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use
    +// with apply.
    +type ResourceStatusApplyConfiguration struct {
    +	Name      *v1.ResourceName                   `json:"name,omitempty"`
    +	Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"`
    +}
    +
    +// ResourceStatusApplyConfiguration constructs a declarative configuration of the ResourceStatus type for use with
    +// apply.
    +func ResourceStatus() *ResourceStatusApplyConfiguration {
    +	return &ResourceStatusApplyConfiguration{}
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration {
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithResources adds the given value to the Resources field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Resources field.
    +func (b *ResourceStatusApplyConfiguration) WithResources(values ...*ResourceHealthApplyConfiguration) *ResourceStatusApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithResources")
    +		}
    +		b.Resources = append(b.Resources, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
    index fffb5b186d..b07f46de91 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ScaleIOPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOPersistentVolumeSource type for use
    +// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use
     // with apply.
     type ScaleIOPersistentVolumeSourceApplyConfiguration struct {
     	Gateway          *string                            `json:"gateway,omitempty"`
    @@ -33,7 +33,7 @@ type ScaleIOPersistentVolumeSourceApplyConfiguration struct {
     	ReadOnly         *bool                              `json:"readOnly,omitempty"`
     }
     
    -// ScaleIOPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOPersistentVolumeSource type for use with
    +// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with
     // apply.
     func ScaleIOPersistentVolumeSource() *ScaleIOPersistentVolumeSourceApplyConfiguration {
     	return &ScaleIOPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
    index b54e1161eb..740c05ebb7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ScaleIOVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOVolumeSource type for use
    +// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use
     // with apply.
     type ScaleIOVolumeSourceApplyConfiguration struct {
     	Gateway          *string                                 `json:"gateway,omitempty"`
    @@ -33,7 +33,7 @@ type ScaleIOVolumeSourceApplyConfiguration struct {
     	ReadOnly         *bool                                   `json:"readOnly,omitempty"`
     }
     
    -// ScaleIOVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOVolumeSource type for use with
    +// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with
     // apply.
     func ScaleIOVolumeSource() *ScaleIOVolumeSourceApplyConfiguration {
     	return &ScaleIOVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
    index c901a2ae6d..c6ec87827f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// ScopedResourceSelectorRequirementApplyConfiguration represents an declarative configuration of the ScopedResourceSelectorRequirement type for use
    +// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use
     // with apply.
     type ScopedResourceSelectorRequirementApplyConfiguration struct {
     	ScopeName *v1.ResourceQuotaScope    `json:"scopeName,omitempty"`
    @@ -30,7 +30,7 @@ type ScopedResourceSelectorRequirementApplyConfiguration struct {
     	Values    []string                  `json:"values,omitempty"`
     }
     
    -// ScopedResourceSelectorRequirementApplyConfiguration constructs an declarative configuration of the ScopedResourceSelectorRequirement type for use with
    +// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with
     // apply.
     func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApplyConfiguration {
     	return &ScopedResourceSelectorRequirementApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
    index 3251e9dc18..a9fb9a1b19 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// ScopeSelectorApplyConfiguration represents an declarative configuration of the ScopeSelector type for use
    +// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use
     // with apply.
     type ScopeSelectorApplyConfiguration struct {
     	MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
     }
     
    -// ScopeSelectorApplyConfiguration constructs an declarative configuration of the ScopeSelector type for use with
    +// ScopeSelectorApplyConfiguration constructs a declarative configuration of the ScopeSelector type for use with
     // apply.
     func ScopeSelector() *ScopeSelectorApplyConfiguration {
     	return &ScopeSelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
    index 9818a00e7a..eb3077a051 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// SeccompProfileApplyConfiguration represents an declarative configuration of the SeccompProfile type for use
    +// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use
     // with apply.
     type SeccompProfileApplyConfiguration struct {
     	Type             *v1.SeccompProfileType `json:"type,omitempty"`
     	LocalhostProfile *string                `json:"localhostProfile,omitempty"`
     }
     
    -// SeccompProfileApplyConfiguration constructs an declarative configuration of the SeccompProfile type for use with
    +// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with
     // apply.
     func SeccompProfile() *SeccompProfileApplyConfiguration {
     	return &SeccompProfileApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
    index 3f7e1eb039..1d850b00bb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// SecretApplyConfiguration represents an declarative configuration of the Secret type for use
    +// SecretApplyConfiguration represents a declarative configuration of the Secret type for use
     // with apply.
     type SecretApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -38,7 +38,7 @@ type SecretApplyConfiguration struct {
     	Type                             *corev1.SecretType `json:"type,omitempty"`
     }
     
    -// Secret constructs an declarative configuration of the Secret type for use with
    +// Secret constructs a declarative configuration of the Secret type for use with
     // apply.
     func Secret(name, namespace string) *SecretApplyConfiguration {
     	b := &SecretApplyConfiguration{}
    @@ -286,3 +286,9 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl
     	b.Type = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *SecretApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
    index 7b22a8d0b2..ba99b7f5fd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// SecretEnvSourceApplyConfiguration represents an declarative configuration of the SecretEnvSource type for use
    +// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use
     // with apply.
     type SecretEnvSourceApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
     	Optional                               *bool `json:"optional,omitempty"`
     }
     
    -// SecretEnvSourceApplyConfiguration constructs an declarative configuration of the SecretEnvSource type for use with
    +// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with
     // apply.
     func SecretEnvSource() *SecretEnvSourceApplyConfiguration {
     	return &SecretEnvSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
    index b8464a348a..2d490b8108 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// SecretKeySelectorApplyConfiguration represents an declarative configuration of the SecretKeySelector type for use
    +// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use
     // with apply.
     type SecretKeySelectorApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
    @@ -26,7 +26,7 @@ type SecretKeySelectorApplyConfiguration struct {
     	Optional                               *bool   `json:"optional,omitempty"`
     }
     
    -// SecretKeySelectorApplyConfiguration constructs an declarative configuration of the SecretKeySelector type for use with
    +// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with
     // apply.
     func SecretKeySelector() *SecretKeySelectorApplyConfiguration {
     	return &SecretKeySelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
    index e8edc61273..65ce3c66da 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// SecretProjectionApplyConfiguration represents an declarative configuration of the SecretProjection type for use
    +// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use
     // with apply.
     type SecretProjectionApplyConfiguration struct {
     	LocalObjectReferenceApplyConfiguration `json:",inline"`
    @@ -26,7 +26,7 @@ type SecretProjectionApplyConfiguration struct {
     	Optional                               *bool                         `json:"optional,omitempty"`
     }
     
    -// SecretProjectionApplyConfiguration constructs an declarative configuration of the SecretProjection type for use with
    +// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with
     // apply.
     func SecretProjection() *SecretProjectionApplyConfiguration {
     	return &SecretProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
    index 95579d003e..f5e0de23aa 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// SecretReferenceApplyConfiguration represents an declarative configuration of the SecretReference type for use
    +// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use
     // with apply.
     type SecretReferenceApplyConfiguration struct {
     	Name      *string `json:"name,omitempty"`
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// SecretReferenceApplyConfiguration constructs an declarative configuration of the SecretReference type for use with
    +// SecretReferenceApplyConfiguration constructs a declarative configuration of the SecretReference type for use with
     // apply.
     func SecretReference() *SecretReferenceApplyConfiguration {
     	return &SecretReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
    index bcb441e9f3..9f765d354d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// SecretVolumeSourceApplyConfiguration represents an declarative configuration of the SecretVolumeSource type for use
    +// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use
     // with apply.
     type SecretVolumeSourceApplyConfiguration struct {
     	SecretName  *string                       `json:"secretName,omitempty"`
    @@ -27,7 +27,7 @@ type SecretVolumeSourceApplyConfiguration struct {
     	Optional    *bool                         `json:"optional,omitempty"`
     }
     
    -// SecretVolumeSourceApplyConfiguration constructs an declarative configuration of the SecretVolumeSource type for use with
    +// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with
     // apply.
     func SecretVolumeSource() *SecretVolumeSourceApplyConfiguration {
     	return &SecretVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
    index 4146b765da..99faab72da 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// SecurityContextApplyConfiguration represents an declarative configuration of the SecurityContext type for use
    +// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use
     // with apply.
     type SecurityContextApplyConfiguration struct {
     	Capabilities             *CapabilitiesApplyConfiguration                  `json:"capabilities,omitempty"`
    @@ -39,7 +39,7 @@ type SecurityContextApplyConfiguration struct {
     	AppArmorProfile          *AppArmorProfileApplyConfiguration               `json:"appArmorProfile,omitempty"`
     }
     
    -// SecurityContextApplyConfiguration constructs an declarative configuration of the SecurityContext type for use with
    +// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with
     // apply.
     func SecurityContext() *SecurityContextApplyConfiguration {
     	return &SecurityContextApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
    index 2938faa18e..bad01300f0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// SELinuxOptionsApplyConfiguration represents an declarative configuration of the SELinuxOptions type for use
    +// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use
     // with apply.
     type SELinuxOptionsApplyConfiguration struct {
     	User  *string `json:"user,omitempty"`
    @@ -27,7 +27,7 @@ type SELinuxOptionsApplyConfiguration struct {
     	Level *string `json:"level,omitempty"`
     }
     
    -// SELinuxOptionsApplyConfiguration constructs an declarative configuration of the SELinuxOptions type for use with
    +// SELinuxOptionsApplyConfiguration constructs a declarative configuration of the SELinuxOptions type for use with
     // apply.
     func SELinuxOptions() *SELinuxOptionsApplyConfiguration {
     	return &SELinuxOptionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
    index 3fa1195237..2dac0589d2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ServiceApplyConfiguration represents an declarative configuration of the Service type for use
    +// ServiceApplyConfiguration represents a declarative configuration of the Service type for use
     // with apply.
     type ServiceApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ServiceApplyConfiguration struct {
     	Status                           *ServiceStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Service constructs an declarative configuration of the Service type for use with
    +// Service constructs a declarative configuration of the Service type for use with
     // apply.
     func Service(name, namespace string) *ServiceApplyConfiguration {
     	b := &ServiceApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ServiceApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
    index 53a8193750..26d33deb95 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ServiceAccountApplyConfiguration represents an declarative configuration of the ServiceAccount type for use
    +// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use
     // with apply.
     type ServiceAccountApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type ServiceAccountApplyConfiguration struct {
     	AutomountServiceAccountToken     *bool                                    `json:"automountServiceAccountToken,omitempty"`
     }
     
    -// ServiceAccount constructs an declarative configuration of the ServiceAccount type for use with
    +// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with
     // apply.
     func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration {
     	b := &ServiceAccountApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu
     	b.AutomountServiceAccountToken = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ServiceAccountApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
    index a52fad7d8d..fab81bf8a2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ServiceAccountTokenProjectionApplyConfiguration represents an declarative configuration of the ServiceAccountTokenProjection type for use
    +// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use
     // with apply.
     type ServiceAccountTokenProjectionApplyConfiguration struct {
     	Audience          *string `json:"audience,omitempty"`
    @@ -26,7 +26,7 @@ type ServiceAccountTokenProjectionApplyConfiguration struct {
     	Path              *string `json:"path,omitempty"`
     }
     
    -// ServiceAccountTokenProjectionApplyConfiguration constructs an declarative configuration of the ServiceAccountTokenProjection type for use with
    +// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with
     // apply.
     func ServiceAccountTokenProjection() *ServiceAccountTokenProjectionApplyConfiguration {
     	return &ServiceAccountTokenProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
    index 8bc63bd950..e889f21345 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
    @@ -23,7 +23,7 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// ServicePortApplyConfiguration represents an declarative configuration of the ServicePort type for use
    +// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use
     // with apply.
     type ServicePortApplyConfiguration struct {
     	Name        *string             `json:"name,omitempty"`
    @@ -34,7 +34,7 @@ type ServicePortApplyConfiguration struct {
     	NodePort    *int32              `json:"nodePort,omitempty"`
     }
     
    -// ServicePortApplyConfiguration constructs an declarative configuration of the ServicePort type for use with
    +// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with
     // apply.
     func ServicePort() *ServicePortApplyConfiguration {
     	return &ServicePortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
    index 5cfbcb700f..41367dce4f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/api/core/v1"
     )
     
    -// ServiceSpecApplyConfiguration represents an declarative configuration of the ServiceSpec type for use
    +// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use
     // with apply.
     type ServiceSpecApplyConfiguration struct {
     	Ports                         []ServicePortApplyConfiguration          `json:"ports,omitempty"`
    @@ -47,7 +47,7 @@ type ServiceSpecApplyConfiguration struct {
     	TrafficDistribution           *string                                  `json:"trafficDistribution,omitempty"`
     }
     
    -// ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with
    +// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with
     // apply.
     func ServiceSpec() *ServiceSpecApplyConfiguration {
     	return &ServiceSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
    index 2347cec678..11c3f8a80a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
    @@ -22,14 +22,14 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ServiceStatusApplyConfiguration represents an declarative configuration of the ServiceStatus type for use
    +// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use
     // with apply.
     type ServiceStatusApplyConfiguration struct {
     	LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
     	Conditions   []metav1.ConditionApplyConfiguration  `json:"conditions,omitempty"`
     }
     
    -// ServiceStatusApplyConfiguration constructs an declarative configuration of the ServiceStatus type for use with
    +// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with
     // apply.
     func ServiceStatus() *ServiceStatusApplyConfiguration {
     	return &ServiceStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
    index 7016f836a1..13b045fffc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// SessionAffinityConfigApplyConfiguration represents an declarative configuration of the SessionAffinityConfig type for use
    +// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use
     // with apply.
     type SessionAffinityConfigApplyConfiguration struct {
     	ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"`
     }
     
    -// SessionAffinityConfigApplyConfiguration constructs an declarative configuration of the SessionAffinityConfig type for use with
    +// SessionAffinityConfigApplyConfiguration constructs a declarative configuration of the SessionAffinityConfig type for use with
     // apply.
     func SessionAffinityConfig() *SessionAffinityConfigApplyConfiguration {
     	return &SessionAffinityConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
    index 8b3284536a..b4115609b1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use
    +// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use
     // with apply.
     type SleepActionApplyConfiguration struct {
     	Seconds *int64 `json:"seconds,omitempty"`
     }
     
    -// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with
    +// SleepActionApplyConfiguration constructs a declarative configuration of the SleepAction type for use with
     // apply.
     func SleepAction() *SleepActionApplyConfiguration {
     	return &SleepActionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
    index 00ed39ccb0..7381a498e1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// StorageOSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSPersistentVolumeSource type for use
    +// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use
     // with apply.
     type StorageOSPersistentVolumeSourceApplyConfiguration struct {
     	VolumeName      *string                            `json:"volumeName,omitempty"`
    @@ -28,7 +28,7 @@ type StorageOSPersistentVolumeSourceApplyConfiguration struct {
     	SecretRef       *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
     }
     
    -// StorageOSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSPersistentVolumeSource type for use with
    +// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with
     // apply.
     func StorageOSPersistentVolumeSource() *StorageOSPersistentVolumeSourceApplyConfiguration {
     	return &StorageOSPersistentVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
    index 7f3b810cf6..81d9373c19 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// StorageOSVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSVolumeSource type for use
    +// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use
     // with apply.
     type StorageOSVolumeSourceApplyConfiguration struct {
     	VolumeName      *string                                 `json:"volumeName,omitempty"`
    @@ -28,7 +28,7 @@ type StorageOSVolumeSourceApplyConfiguration struct {
     	SecretRef       *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
     }
     
    -// StorageOSVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSVolumeSource type for use with
    +// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with
     // apply.
     func StorageOSVolumeSource() *StorageOSVolumeSourceApplyConfiguration {
     	return &StorageOSVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
    index deab9e0b38..7719eb7d60 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// SysctlApplyConfiguration represents an declarative configuration of the Sysctl type for use
    +// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use
     // with apply.
     type SysctlApplyConfiguration struct {
     	Name  *string `json:"name,omitempty"`
     	Value *string `json:"value,omitempty"`
     }
     
    -// SysctlApplyConfiguration constructs an declarative configuration of the Sysctl type for use with
    +// SysctlApplyConfiguration constructs a declarative configuration of the Sysctl type for use with
     // apply.
     func Sysctl() *SysctlApplyConfiguration {
     	return &SysctlApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
    index 4672b87427..a34fb05526 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use
    +// TaintApplyConfiguration represents a declarative configuration of the Taint type for use
     // with apply.
     type TaintApplyConfiguration struct {
     	Key       *string         `json:"key,omitempty"`
    @@ -32,7 +32,7 @@ type TaintApplyConfiguration struct {
     	TimeAdded *metav1.Time    `json:"timeAdded,omitempty"`
     }
     
    -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with
    +// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with
     // apply.
     func Taint() *TaintApplyConfiguration {
     	return &TaintApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
    index bd038fc3ae..cba1a7d081 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// TCPSocketActionApplyConfiguration represents an declarative configuration of the TCPSocketAction type for use
    +// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use
     // with apply.
     type TCPSocketActionApplyConfiguration struct {
     	Port *intstr.IntOrString `json:"port,omitempty"`
     	Host *string             `json:"host,omitempty"`
     }
     
    -// TCPSocketActionApplyConfiguration constructs an declarative configuration of the TCPSocketAction type for use with
    +// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with
     // apply.
     func TCPSocketAction() *TCPSocketActionApplyConfiguration {
     	return &TCPSocketActionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
    index 1a92a8c668..1bcc85b65f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// TolerationApplyConfiguration represents an declarative configuration of the Toleration type for use
    +// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use
     // with apply.
     type TolerationApplyConfiguration struct {
     	Key               *string                `json:"key,omitempty"`
    @@ -32,7 +32,7 @@ type TolerationApplyConfiguration struct {
     	TolerationSeconds *int64                 `json:"tolerationSeconds,omitempty"`
     }
     
    -// TolerationApplyConfiguration constructs an declarative configuration of the Toleration type for use with
    +// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with
     // apply.
     func Toleration() *TolerationApplyConfiguration {
     	return &TolerationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
    index 9581490de2..674ddec93c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// TopologySelectorLabelRequirementApplyConfiguration represents an declarative configuration of the TopologySelectorLabelRequirement type for use
    +// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use
     // with apply.
     type TopologySelectorLabelRequirementApplyConfiguration struct {
     	Key    *string  `json:"key,omitempty"`
     	Values []string `json:"values,omitempty"`
     }
     
    -// TopologySelectorLabelRequirementApplyConfiguration constructs an declarative configuration of the TopologySelectorLabelRequirement type for use with
    +// TopologySelectorLabelRequirementApplyConfiguration constructs a declarative configuration of the TopologySelectorLabelRequirement type for use with
     // apply.
     func TopologySelectorLabelRequirement() *TopologySelectorLabelRequirementApplyConfiguration {
     	return &TopologySelectorLabelRequirementApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
    index a025b8a2a8..7812ae5204 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// TopologySelectorTermApplyConfiguration represents an declarative configuration of the TopologySelectorTerm type for use
    +// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use
     // with apply.
     type TopologySelectorTermApplyConfiguration struct {
     	MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"`
     }
     
    -// TopologySelectorTermApplyConfiguration constructs an declarative configuration of the TopologySelectorTerm type for use with
    +// TopologySelectorTermApplyConfiguration constructs a declarative configuration of the TopologySelectorTerm type for use with
     // apply.
     func TopologySelectorTerm() *TopologySelectorTermApplyConfiguration {
     	return &TopologySelectorTermApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
    index fbfa8fa886..b21d233513 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use
    +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use
     // with apply.
     type TopologySpreadConstraintApplyConfiguration struct {
     	MaxSkew            *int32                                  `json:"maxSkew,omitempty"`
    @@ -36,7 +36,7 @@ type TopologySpreadConstraintApplyConfiguration struct {
     	MatchLabelKeys     []string                                `json:"matchLabelKeys,omitempty"`
     }
     
    -// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with
    +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with
     // apply.
     func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration {
     	return &TopologySpreadConstraintApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
    index cdc2eb7d34..1e63b79889 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// TypedLocalObjectReferenceApplyConfiguration represents an declarative configuration of the TypedLocalObjectReference type for use
    +// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use
     // with apply.
     type TypedLocalObjectReferenceApplyConfiguration struct {
     	APIGroup *string `json:"apiGroup,omitempty"`
    @@ -26,7 +26,7 @@ type TypedLocalObjectReferenceApplyConfiguration struct {
     	Name     *string `json:"name,omitempty"`
     }
     
    -// TypedLocalObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedLocalObjectReference type for use with
    +// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with
     // apply.
     func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration {
     	return &TypedLocalObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
    index d9a01c9c3a..f07de8902e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// TypedObjectReferenceApplyConfiguration represents an declarative configuration of the TypedObjectReference type for use
    +// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use
     // with apply.
     type TypedObjectReferenceApplyConfiguration struct {
     	APIGroup  *string `json:"apiGroup,omitempty"`
    @@ -27,7 +27,7 @@ type TypedObjectReferenceApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// TypedObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedObjectReference type for use with
    +// TypedObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedObjectReference type for use with
     // apply.
     func TypedObjectReference() *TypedObjectReferenceApplyConfiguration {
     	return &TypedObjectReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
    index db0686bce7..9a48f83497 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use
    +// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use
     // with apply.
     type VolumeApplyConfiguration struct {
     	Name                           *string `json:"name,omitempty"`
     	VolumeSourceApplyConfiguration `json:",inline"`
     }
     
    -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with
    +// VolumeApplyConfiguration constructs a declarative configuration of the Volume type for use with
     // apply.
     func Volume() *VolumeApplyConfiguration {
     	return &VolumeApplyConfiguration{}
    @@ -270,3 +270,11 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp
     	b.Ephemeral = value
     	return b
     }
    +
    +// WithImage sets the Image field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Image field is set to the value of the last call.
    +func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
    +	b.Image = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
    index ea18ca8d9e..0bc52aad2a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// VolumeDeviceApplyConfiguration represents an declarative configuration of the VolumeDevice type for use
    +// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use
     // with apply.
     type VolumeDeviceApplyConfiguration struct {
     	Name       *string `json:"name,omitempty"`
     	DevicePath *string `json:"devicePath,omitempty"`
     }
     
    -// VolumeDeviceApplyConfiguration constructs an declarative configuration of the VolumeDevice type for use with
    +// VolumeDeviceApplyConfiguration constructs a declarative configuration of the VolumeDevice type for use with
     // apply.
     func VolumeDevice() *VolumeDeviceApplyConfiguration {
     	return &VolumeDeviceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
    index 358658350e..49f22cc4e7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// VolumeMountApplyConfiguration represents an declarative configuration of the VolumeMount type for use
    +// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use
     // with apply.
     type VolumeMountApplyConfiguration struct {
     	Name              *string                   `json:"name,omitempty"`
    @@ -34,7 +34,7 @@ type VolumeMountApplyConfiguration struct {
     	SubPathExpr       *string                   `json:"subPathExpr,omitempty"`
     }
     
    -// VolumeMountApplyConfiguration constructs an declarative configuration of the VolumeMount type for use with
    +// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with
     // apply.
     func VolumeMount() *VolumeMountApplyConfiguration {
     	return &VolumeMountApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
    index c3d187fdfa..a0a9b5401c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// VolumeMountStatusApplyConfiguration represents an declarative configuration of the VolumeMountStatus type for use
    +// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use
     // with apply.
     type VolumeMountStatusApplyConfiguration struct {
     	Name              *string                   `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type VolumeMountStatusApplyConfiguration struct {
     	RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
     }
     
    -// VolumeMountStatusApplyConfiguration constructs an declarative configuration of the VolumeMountStatus type for use with
    +// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with
     // apply.
     func VolumeMountStatus() *VolumeMountStatusApplyConfiguration {
     	return &VolumeMountStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
    index 32bfd82928..9198c25dc8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// VolumeNodeAffinityApplyConfiguration represents an declarative configuration of the VolumeNodeAffinity type for use
    +// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use
     // with apply.
     type VolumeNodeAffinityApplyConfiguration struct {
     	Required *NodeSelectorApplyConfiguration `json:"required,omitempty"`
     }
     
    -// VolumeNodeAffinityApplyConfiguration constructs an declarative configuration of the VolumeNodeAffinity type for use with
    +// VolumeNodeAffinityApplyConfiguration constructs a declarative configuration of the VolumeNodeAffinity type for use with
     // apply.
     func VolumeNodeAffinity() *VolumeNodeAffinityApplyConfiguration {
     	return &VolumeNodeAffinityApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
    index a2ef0a9943..c14e9fe697 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// VolumeProjectionApplyConfiguration represents an declarative configuration of the VolumeProjection type for use
    +// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use
     // with apply.
     type VolumeProjectionApplyConfiguration struct {
     	Secret              *SecretProjectionApplyConfiguration              `json:"secret,omitempty"`
    @@ -28,7 +28,7 @@ type VolumeProjectionApplyConfiguration struct {
     	ClusterTrustBundle  *ClusterTrustBundleProjectionApplyConfiguration  `json:"clusterTrustBundle,omitempty"`
     }
     
    -// VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with
    +// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with
     // apply.
     func VolumeProjection() *VolumeProjectionApplyConfiguration {
     	return &VolumeProjectionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
    index 89ad1da8b3..ae849f7741 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use
    +// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use
     // with apply.
     type VolumeResourceRequirementsApplyConfiguration struct {
     	Limits   *v1.ResourceList `json:"limits,omitempty"`
     	Requests *v1.ResourceList `json:"requests,omitempty"`
     }
     
    -// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with
    +// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with
     // apply.
     func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration {
     	return &VolumeResourceRequirementsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
    index 4a8d316dd5..aeead953cf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// VolumeSourceApplyConfiguration represents an declarative configuration of the VolumeSource type for use
    +// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use
     // with apply.
     type VolumeSourceApplyConfiguration struct {
     	HostPath              *HostPathVolumeSourceApplyConfiguration              `json:"hostPath,omitempty"`
    @@ -50,9 +50,10 @@ type VolumeSourceApplyConfiguration struct {
     	StorageOS             *StorageOSVolumeSourceApplyConfiguration             `json:"storageos,omitempty"`
     	CSI                   *CSIVolumeSourceApplyConfiguration                   `json:"csi,omitempty"`
     	Ephemeral             *EphemeralVolumeSourceApplyConfiguration             `json:"ephemeral,omitempty"`
    +	Image                 *ImageVolumeSourceApplyConfiguration                 `json:"image,omitempty"`
     }
     
    -// VolumeSourceApplyConfiguration constructs an declarative configuration of the VolumeSource type for use with
    +// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with
     // apply.
     func VolumeSource() *VolumeSourceApplyConfiguration {
     	return &VolumeSourceApplyConfiguration{}
    @@ -289,3 +290,11 @@ func (b *VolumeSourceApplyConfiguration) WithEphemeral(value *EphemeralVolumeSou
     	b.Ephemeral = value
     	return b
     }
    +
    +// WithImage sets the Image field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Image field is set to the value of the last call.
    +func (b *VolumeSourceApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeSourceApplyConfiguration {
    +	b.Image = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
    index ff3e3e27d9..ea8fd8d62e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// VsphereVirtualDiskVolumeSourceApplyConfiguration represents an declarative configuration of the VsphereVirtualDiskVolumeSource type for use
    +// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use
     // with apply.
     type VsphereVirtualDiskVolumeSourceApplyConfiguration struct {
     	VolumePath        *string `json:"volumePath,omitempty"`
    @@ -27,7 +27,7 @@ type VsphereVirtualDiskVolumeSourceApplyConfiguration struct {
     	StoragePolicyID   *string `json:"storagePolicyID,omitempty"`
     }
     
    -// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the VsphereVirtualDiskVolumeSource type for use with
    +// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with
     // apply.
     func VsphereVirtualDiskVolumeSource() *VsphereVirtualDiskVolumeSourceApplyConfiguration {
     	return &VsphereVirtualDiskVolumeSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
    index eb99d06ffa..c49ef93eb4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// WeightedPodAffinityTermApplyConfiguration represents an declarative configuration of the WeightedPodAffinityTerm type for use
    +// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use
     // with apply.
     type WeightedPodAffinityTermApplyConfiguration struct {
     	Weight          *int32                             `json:"weight,omitempty"`
     	PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"`
     }
     
    -// WeightedPodAffinityTermApplyConfiguration constructs an declarative configuration of the WeightedPodAffinityTerm type for use with
    +// WeightedPodAffinityTermApplyConfiguration constructs a declarative configuration of the WeightedPodAffinityTerm type for use with
     // apply.
     func WeightedPodAffinityTerm() *WeightedPodAffinityTermApplyConfiguration {
     	return &WeightedPodAffinityTermApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
    index 20692e0146..bb37a500b4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// WindowsSecurityContextOptionsApplyConfiguration represents an declarative configuration of the WindowsSecurityContextOptions type for use
    +// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use
     // with apply.
     type WindowsSecurityContextOptionsApplyConfiguration struct {
     	GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"`
    @@ -27,7 +27,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct {
     	HostProcess            *bool   `json:"hostProcess,omitempty"`
     }
     
    -// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with
    +// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with
     // apply.
     func WindowsSecurityContextOptions() *WindowsSecurityContextOptionsApplyConfiguration {
     	return &WindowsSecurityContextOptionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
    index d8c2359a3b..df45a6fb8a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
    @@ -22,7 +22,7 @@ import (
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use
    +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use
     // with apply.
     type EndpointApplyConfiguration struct {
     	Addresses          []string                                  `json:"addresses,omitempty"`
    @@ -35,7 +35,7 @@ type EndpointApplyConfiguration struct {
     	Hints              *EndpointHintsApplyConfiguration          `json:"hints,omitempty"`
     }
     
    -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with
    +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with
     // apply.
     func Endpoint() *EndpointApplyConfiguration {
     	return &EndpointApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
    index 68c25dd57c..20f0b97124 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use
    +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use
     // with apply.
     type EndpointConditionsApplyConfiguration struct {
     	Ready       *bool `json:"ready,omitempty"`
    @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct {
     	Terminating *bool `json:"terminating,omitempty"`
     }
     
    -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with
    +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with
     // apply.
     func EndpointConditions() *EndpointConditionsApplyConfiguration {
     	return &EndpointConditionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
    index 6eb9f21a51..d2d0f67769 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use
    +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use
     // with apply.
     type EndpointHintsApplyConfiguration struct {
     	ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"`
     }
     
    -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with
    +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with
     // apply.
     func EndpointHints() *EndpointHintsApplyConfiguration {
     	return &EndpointHintsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
    index c712956009..12908deb61 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use
    +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
     // with apply.
     type EndpointPortApplyConfiguration struct {
     	Name        *string      `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct {
     	AppProtocol *string      `json:"appProtocol,omitempty"`
     }
     
    -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with
    +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
     // apply.
     func EndpointPort() *EndpointPortApplyConfiguration {
     	return &EndpointPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
    index 640613753d..97002d2bbb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use
    +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use
     // with apply.
     type EndpointSliceApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct {
     	Ports                            []EndpointPortApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with
    +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with
     // apply.
     func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
     	b := &EndpointSliceApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EndpointSliceApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
    index 192a5ad2e8..505d11ae2f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use
    +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use
     // with apply.
     type ForZoneApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with
    +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with
     // apply.
     func ForZone() *ForZoneApplyConfiguration {
     	return &ForZoneApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
    index 724c2d007c..5d87dae72e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use
    +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use
     // with apply.
     type EndpointApplyConfiguration struct {
     	Addresses  []string                              `json:"addresses,omitempty"`
    @@ -34,7 +34,7 @@ type EndpointApplyConfiguration struct {
     	Hints      *EndpointHintsApplyConfiguration      `json:"hints,omitempty"`
     }
     
    -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with
    +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with
     // apply.
     func Endpoint() *EndpointApplyConfiguration {
     	return &EndpointApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
    index bc0438f90b..13f5fa5575 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use
    +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use
     // with apply.
     type EndpointConditionsApplyConfiguration struct {
     	Ready       *bool `json:"ready,omitempty"`
    @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct {
     	Terminating *bool `json:"terminating,omitempty"`
     }
     
    -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with
    +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with
     // apply.
     func EndpointConditions() *EndpointConditionsApplyConfiguration {
     	return &EndpointConditionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
    index 41d80206b3..99f69027a8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use
    +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use
     // with apply.
     type EndpointHintsApplyConfiguration struct {
     	ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"`
     }
     
    -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with
    +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with
     // apply.
     func EndpointHints() *EndpointHintsApplyConfiguration {
     	return &EndpointHintsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
    index 9a3a31b965..07cfc684b2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use
    +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
     // with apply.
     type EndpointPortApplyConfiguration struct {
     	Name        *string      `json:"name,omitempty"`
    @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct {
     	AppProtocol *string      `json:"appProtocol,omitempty"`
     }
     
    -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with
    +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
     // apply.
     func EndpointPort() *EndpointPortApplyConfiguration {
     	return &EndpointPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
    index 74a24773cc..888319bc0f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use
    +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use
     // with apply.
     type EndpointSliceApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct {
     	Ports                            []EndpointPortApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with
    +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with
     // apply.
     func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
     	b := &EndpointSliceApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EndpointSliceApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
    index 4d1455ed38..4af09cc49b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use
    +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use
     // with apply.
     type ForZoneApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with
    +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with
     // apply.
     func ForZone() *ForZoneApplyConfiguration {
     	return &ForZoneApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/doc.go b/vendor/k8s.io/client-go/applyconfigurations/doc.go
    new file mode 100644
    index 0000000000..ac426c6075
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/doc.go
    @@ -0,0 +1,151 @@
    +/*
    +Copyright 2021 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +/*
    +Package applyconfigurations provides typesafe go representations of the apply
    +configurations that are used to constructs Server-side Apply requests.
    +
    +# Basics
    +
    +The Apply functions in the typed client (see the k8s.io/client-go/kubernetes/typed packages) offer
    +a direct and typesafe way of calling Server-side Apply. Each Apply function takes an "apply
    +configuration" type as an argument, which is a structured representation of an Apply request. For
    +example:
    +
    +	import (
    +	     ...
    +	     v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	)
    +	hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns).
    +	     WithSpec(v1ac.HorizontalPodAutoscalerSpec().
    +	              WithMinReplicas(0)
    +	     )
    +	return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true})
    +
    +Note in this example that HorizontalPodAutoscaler is imported from an "applyconfigurations"
    +package. Each "apply configuration" type represents the same Kubernetes object kind as the
    +corresponding go struct, but where all fields are pointers to make them optional, allowing apply
    +requests to be accurately represented. For example, this when the apply configuration in the above
    +example is marshalled to YAML, it produces:
    +
    +	apiVersion: autoscaling/v1
    +	kind: HorizontalPodAutoscaler
    +	metadata:
    +	    name: myHPA
    +	    namespace: myNamespace
    +	spec:
    +	    minReplicas: 0
    +
    +To understand why this is needed, the above YAML cannot be produced by the
    +v1.HorizontalPodAutoscaler go struct. Take for example:
    +
    +	hpa := v1.HorizontalPodAutoscaler{
    +	     TypeMeta: metav1.TypeMeta{
    +	              APIVersion: "autoscaling/v1",
    +	              Kind:       "HorizontalPodAutoscaler",
    +	     },
    +	     ObjectMeta: ObjectMeta{
    +	              Namespace: ns,
    +	              Name:      autoscalerName,
    +	     },
    +	     Spec: v1.HorizontalPodAutoscalerSpec{
    +	              MinReplicas: pointer.Int32Ptr(0),
    +	     },
    +	}
    +
    +The above code attempts to declare the same apply configuration as shown in the previous examples,
    +but when marshalled to YAML, produces:
    +
    +	kind: HorizontalPodAutoscaler
    +	apiVersion: autoscaling/v1
    +	metadata:
    +	  name: myHPA
    +	  namespace: myNamespace
    +	  creationTimestamp: null
    +	spec:
    +	  scaleTargetRef:
    +	    kind: ""
    +	    name: ""
    +	  minReplicas: 0
    +	  maxReplicas: 0
    +
    +Which, among other things, contains spec.maxReplicas set to 0. This is almost certainly not what
    +the caller intended (the intended apply configuration says nothing about the maxReplicas field),
    +and could have serious consequences on a production system: it directs the autoscaler to downscale
    +to zero pods. The problem here originates from the fact that the go structs contain required fields
    +that are zero valued if not set explicitly. The go structs work as intended for create and update
    +operations, but are fundamentally incompatible with apply, which is why we have introduced the
    +generated "apply configuration" types.
    +
    +The "apply configurations" also have convenience With functions that make it easier to
    +build apply requests. This allows developers to set fields without having to deal with the fact that
    +all the fields in the "apply configuration" types are pointers, and are inconvenient to set using
    +go. For example "MinReplicas: &0" is not legal go code, so without the With functions, developers
    +would work around this problem by using a library, .e.g. "MinReplicas: pointer.Int32Ptr(0)", but
    +string enumerations like corev1.Protocol are still a problem since they cannot be supported by a
    +general purpose library. In addition to the convenience, the With functions also isolate
    +developers from the underlying representation, which makes it safer for the underlying
    +representation to be changed to support additional features in the future.
    +
    +# Controller Support
    +
    +The new client-go support makes it much easier to use Server-side Apply in controllers, by either of
    +two mechanisms.
    +
    +Mechanism 1:
    +
    +When authoring new controllers to use Server-side Apply, a good approach is to have the controller
    +recreate the apply configuration for an object each time it reconciles that object.  This ensures
    +that the controller fully reconciles all the fields that it is responsible for. Controllers
    +typically should unconditionally set all the fields they own by setting "Force: true" in the
    +ApplyOptions. Controllers must also provide a FieldManager name that is unique to the
    +reconciliation loop that apply is called from.
    +
    +When upgrading existing controllers to use Server-side Apply the same approach often works
    +well--migrate the controllers to recreate the apply configuration each time it reconciles any
    +object. For cases where this does not work well, see Mechanism 2.
    +
    +Mechanism 2:
    +
    +When upgrading existing controllers to use Server-side Apply, the controller might have multiple
    +code paths that update different parts of an object depending on various conditions. Migrating a
    +controller like this to Server-side Apply can be risky because if the controller forgets to include
    +any fields in an apply configuration that is included in a previous apply request, a field can be
    +accidentally deleted. For such cases, an alternative to mechanism 1 is to replace any controller
    +reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a
    +"extract/modify-in-place/apply" workflow. Here's an example of the new workflow:
    +
    +	    fieldMgr := "my-field-manager"
    +	    deploymentClient := clientset.AppsV1().Deployments("default")
    +	    // read, could also be read from a shared informer
    +	    deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{})
    +	    if err != nil {
    +	      // handle error
    +	    }
    +	    // extract
    +	    deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr)
    +	    if err != nil {
    +	      // handle error
    +	    }
    +	    // modify-in-place
    +	    deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container().
    +		WithName("modify-slice").
    +		WithImage("nginx:1.14.2"),
    +	    )
    +	    // apply
    +	    applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr})
    +*/
    +package applyconfigurations // import "k8s.io/client-go/applyconfigurations"
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
    index 767e3dfc73..a6e98d1c82 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EventApplyConfiguration represents an declarative configuration of the Event type for use
    +// EventApplyConfiguration represents a declarative configuration of the Event type for use
     // with apply.
     type EventApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -49,7 +49,7 @@ type EventApplyConfiguration struct {
     	DeprecatedCount                  *int32                                    `json:"deprecatedCount,omitempty"`
     }
     
    -// Event constructs an declarative configuration of the Event type for use with
    +// Event constructs a declarative configuration of the Event type for use with
     // apply.
     func Event(name, namespace string) *EventApplyConfiguration {
     	b := &EventApplyConfiguration{}
    @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo
     	b.DeprecatedCount = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EventApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
    index e66fb41271..18069c0d1b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use
    +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
     // with apply.
     type EventSeriesApplyConfiguration struct {
     	Count            *int32        `json:"count,omitempty"`
     	LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
     }
     
    -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with
    +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with
     // apply.
     func EventSeries() *EventSeriesApplyConfiguration {
     	return &EventSeriesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
    index cfc4a851f3..890d95748b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EventApplyConfiguration represents an declarative configuration of the Event type for use
    +// EventApplyConfiguration represents a declarative configuration of the Event type for use
     // with apply.
     type EventApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -49,7 +49,7 @@ type EventApplyConfiguration struct {
     	DeprecatedCount                  *int32                                    `json:"deprecatedCount,omitempty"`
     }
     
    -// Event constructs an declarative configuration of the Event type for use with
    +// Event constructs a declarative configuration of the Event type for use with
     // apply.
     func Event(name, namespace string) *EventApplyConfiguration {
     	b := &EventApplyConfiguration{}
    @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo
     	b.DeprecatedCount = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EventApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
    index 640a265172..75d936e8be 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use
    +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
     // with apply.
     type EventSeriesApplyConfiguration struct {
     	Count            *int32        `json:"count,omitempty"`
     	LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
     }
     
    -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with
    +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with
     // apply.
     func EventSeries() *EventSeriesApplyConfiguration {
     	return &EventSeriesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
    index eae399d323..ff778529c9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use
    +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
     // with apply.
     type DaemonSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct {
     	Status                           *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with
    +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
     // apply.
     func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
     	b := &DaemonSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DaemonSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
    index bbf718f0f2..9b8057e69d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use
    +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
     // with apply.
     type DaemonSetConditionApplyConfiguration struct {
     	Type               *v1beta1.DaemonSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct {
     	Message            *string                         `json:"message,omitempty"`
     }
     
    -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with
    +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
     // apply.
     func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
     	return &DaemonSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
    index b5d7a0c161..d628969187 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use
    +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
     // with apply.
     type DaemonSetSpecApplyConfiguration struct {
     	Selector             *v1.LabelSelectorApplyConfiguration        `json:"selector,omitempty"`
    @@ -34,7 +34,7 @@ type DaemonSetSpecApplyConfiguration struct {
     	RevisionHistoryLimit *int32                                     `json:"revisionHistoryLimit,omitempty"`
     }
     
    -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with
    +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
     // apply.
     func DaemonSetSpec() *DaemonSetSpecApplyConfiguration {
     	return &DaemonSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
    index be6b3b2853..373f9ef97a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use
    +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
     // with apply.
     type DaemonSetStatusApplyConfiguration struct {
     	CurrentNumberScheduled *int32                                 `json:"currentNumberScheduled,omitempty"`
    @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct {
     	Conditions             []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with
    +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
     // apply.
     func DaemonSetStatus() *DaemonSetStatusApplyConfiguration {
     	return &DaemonSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
    index 2c827e62d4..e597b15a6a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     )
     
    -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use
    +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
     // with apply.
     type DaemonSetUpdateStrategyApplyConfiguration struct {
     	Type          *v1beta1.DaemonSetUpdateStrategyType      `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with
    +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with
     // apply.
     func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
     	return &DaemonSetUpdateStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
    index 878083f821..6badc64d82 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use
    +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
     // with apply.
     type DeploymentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct {
     	Status                           *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Deployment constructs an declarative configuration of the Deployment type for use with
    +// Deployment constructs a declarative configuration of the Deployment type for use with
     // apply.
     func Deployment(name, namespace string) *DeploymentApplyConfiguration {
     	b := &DeploymentApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DeploymentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
    index d8a214b7fc..79e109a779 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use
    +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
     // with apply.
     type DeploymentConditionApplyConfiguration struct {
     	Type               *v1beta1.DeploymentConditionType `json:"type,omitempty"`
    @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with
    +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
     // apply.
     func DeploymentCondition() *DeploymentConditionApplyConfiguration {
     	return &DeploymentConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
    index 5e18476bdc..5531c756f9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use
    +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
     // with apply.
     type DeploymentSpecApplyConfiguration struct {
     	Replicas                *int32                                    `json:"replicas,omitempty"`
    @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct {
     	ProgressDeadlineSeconds *int32                                    `json:"progressDeadlineSeconds,omitempty"`
     }
     
    -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with
    +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
     // apply.
     func DeploymentSpec() *DeploymentSpecApplyConfiguration {
     	return &DeploymentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
    index f8d1cf5d25..adc023a34d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use
    +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
     // with apply.
     type DeploymentStatusApplyConfiguration struct {
     	ObservedGeneration  *int64                                  `json:"observedGeneration,omitempty"`
    @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct {
     	CollisionCount      *int32                                  `json:"collisionCount,omitempty"`
     }
     
    -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with
    +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
     // apply.
     func DeploymentStatus() *DeploymentStatusApplyConfiguration {
     	return &DeploymentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
    index 7c17b40722..2d88406eb9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     )
     
    -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use
    +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
     // with apply.
     type DeploymentStrategyApplyConfiguration struct {
     	Type          *v1beta1.DeploymentStrategyType            `json:"type,omitempty"`
     	RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
     }
     
    -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with
    +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with
     // apply.
     func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
     	return &DeploymentStrategyApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
    index 361605d8cd..3826e0dddc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     )
     
    -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use
    +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
     // with apply.
     type HTTPIngressPathApplyConfiguration struct {
     	Path     *string                           `json:"path,omitempty"`
    @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct {
     	Backend  *IngressBackendApplyConfiguration `json:"backend,omitempty"`
     }
     
    -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with
    +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
     // apply.
     func HTTPIngressPath() *HTTPIngressPathApplyConfiguration {
     	return &HTTPIngressPathApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
    index 3137bc5eb0..1245452237 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use
    +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
     // with apply.
     type HTTPIngressRuleValueApplyConfiguration struct {
     	Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
     }
     
    -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with
    +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with
     // apply.
     func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration {
     	return &HTTPIngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
    index 46c541048d..6738bf07bf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use
    +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
     // with apply.
     type IngressApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct {
     	Status                           *IngressStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Ingress constructs an declarative configuration of the Ingress type for use with
    +// Ingress constructs a declarative configuration of the Ingress type for use with
     // apply.
     func Ingress(name, namespace string) *IngressApplyConfiguration {
     	b := &IngressApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IngressApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
    index f19c2f2ee2..9d386f1608 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use
    +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
     // with apply.
     type IngressBackendApplyConfiguration struct {
     	ServiceName *string                                         `json:"serviceName,omitempty"`
    @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct {
     	Resource    *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
     }
     
    -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with
    +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with
     // apply.
     func IngressBackend() *IngressBackendApplyConfiguration {
     	return &IngressBackendApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
    index 20bf637805..12dbc35969 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use
    +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
     // with apply.
     type IngressLoadBalancerIngressApplyConfiguration struct {
     	IP       *string                               `json:"ip,omitempty"`
    @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct {
     	Ports    []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with
    +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
     // apply.
     func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration {
     	return &IngressLoadBalancerIngressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
    index e16dd23633..e896ab3415 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use
    +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
     // with apply.
     type IngressLoadBalancerStatusApplyConfiguration struct {
     	Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
     }
     
    -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with
    +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with
     // apply.
     func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration {
     	return &IngressLoadBalancerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
    index 0836537979..4ee3f01617 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use
    +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
     // with apply.
     type IngressPortStatusApplyConfiguration struct {
     	Port     *int32       `json:"port,omitempty"`
    @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct {
     	Error    *string      `json:"error,omitempty"`
     }
     
    -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with
    +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
     // apply.
     func IngressPortStatus() *IngressPortStatusApplyConfiguration {
     	return &IngressPortStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
    index 015541eeb9..dc676f7b60 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use
    +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
     // with apply.
     type IngressRuleApplyConfiguration struct {
     	Host                               *string `json:"host,omitempty"`
    -	IngressRuleValueApplyConfiguration `json:",omitempty,inline"`
    +	IngressRuleValueApplyConfiguration `json:",inline"`
     }
     
    -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with
    +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with
     // apply.
     func IngressRule() *IngressRuleApplyConfiguration {
     	return &IngressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
    index 2d03c7b132..4a64124755 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use
    +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
     // with apply.
     type IngressRuleValueApplyConfiguration struct {
     	HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
     }
     
    -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with
    +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with
     // apply.
     func IngressRuleValue() *IngressRuleValueApplyConfiguration {
     	return &IngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
    index 1ab4d8bb73..58fbde8b35 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use
    +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
     // with apply.
     type IngressSpecApplyConfiguration struct {
     	IngressClassName *string                           `json:"ingressClassName,omitempty"`
    @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct {
     	Rules            []IngressRuleApplyConfiguration   `json:"rules,omitempty"`
     }
     
    -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with
    +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
     // apply.
     func IngressSpec() *IngressSpecApplyConfiguration {
     	return &IngressSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
    index faa7e2446f..3aed616889 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use
    +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
     // with apply.
     type IngressStatusApplyConfiguration struct {
     	LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
     }
     
    -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with
    +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with
     // apply.
     func IngressStatus() *IngressStatusApplyConfiguration {
     	return &IngressStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
    index 8ca93a0bc2..63648cd464 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use
    +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
     // with apply.
     type IngressTLSApplyConfiguration struct {
     	Hosts      []string `json:"hosts,omitempty"`
     	SecretName *string  `json:"secretName,omitempty"`
     }
     
    -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with
    +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
     // apply.
     func IngressTLS() *IngressTLSApplyConfiguration {
     	return &IngressTLSApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
    index a90d3b2207..4a671130b8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use
    +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use
     // with apply.
     type IPBlockApplyConfiguration struct {
     	CIDR   *string  `json:"cidr,omitempty"`
     	Except []string `json:"except,omitempty"`
     }
     
    -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with
    +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with
     // apply.
     func IPBlock() *IPBlockApplyConfiguration {
     	return &IPBlockApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
    index 27ea5d9dde..fb1f95a6d6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use
    +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use
     // with apply.
     type NetworkPolicyApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct {
     	Spec                             *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with
    +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with
     // apply.
     func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
     	b := &NetworkPolicyApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *NetworkPolicyApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
    index 6335ec375d..ca3e174f93 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use
    +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use
     // with apply.
     type NetworkPolicyEgressRuleApplyConfiguration struct {
     	Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
     	To    []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
     }
     
    -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with
    +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with
     // apply.
     func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration {
     	return &NetworkPolicyEgressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
    index 2ecc4c8c65..1607137204 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use
    +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use
     // with apply.
     type NetworkPolicyIngressRuleApplyConfiguration struct {
     	Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
     	From  []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
     }
     
    -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with
    +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with
     // apply.
     func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration {
     	return &NetworkPolicyIngressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
    index c69b281225..8a0fa57415 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use
    +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use
     // with apply.
     type NetworkPolicyPeerApplyConfiguration struct {
     	PodSelector       *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
    @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct {
     	IPBlock           *IPBlockApplyConfiguration          `json:"ipBlock,omitempty"`
     }
     
    -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with
    +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with
     // apply.
     func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration {
     	return &NetworkPolicyPeerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
    index 0140d771bf..6bc1c1977b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
    @@ -23,7 +23,7 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use
    +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use
     // with apply.
     type NetworkPolicyPortApplyConfiguration struct {
     	Protocol *v1.Protocol        `json:"protocol,omitempty"`
    @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct {
     	EndPort  *int32              `json:"endPort,omitempty"`
     }
     
    -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with
    +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with
     // apply.
     func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration {
     	return &NetworkPolicyPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
    index 179e4bd024..4454329c5b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use
    +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use
     // with apply.
     type NetworkPolicySpecApplyConfiguration struct {
     	PodSelector *v1.LabelSelectorApplyConfiguration          `json:"podSelector,omitempty"`
    @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct {
     	PolicyTypes []extensionsv1beta1.PolicyType               `json:"policyTypes,omitempty"`
     }
     
    -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with
    +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with
     // apply.
     func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration {
     	return &NetworkPolicySpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
    index b2afc835d8..24c6b6ad1a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use
    +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
     // with apply.
     type ReplicaSetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct {
     	Status                           *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with
    +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
     // apply.
     func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
     	b := &ReplicaSetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ReplicaSetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
    index b717365175..21a25ae81f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use
    +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
     // with apply.
     type ReplicaSetConditionApplyConfiguration struct {
     	Type               *v1beta1.ReplicaSetConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with
    +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
     // apply.
     func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
     	return &ReplicaSetConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
    index 5d0c570149..27653dd1af 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use
    +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
     // with apply.
     type ReplicaSetSpecApplyConfiguration struct {
     	Replicas        *int32                                    `json:"replicas,omitempty"`
    @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct {
     	Template        *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
     }
     
    -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with
    +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
     // apply.
     func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration {
     	return &ReplicaSetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
    index 45dc4bf319..9a5b468a3f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use
    +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
     // with apply.
     type ReplicaSetStatusApplyConfiguration struct {
     	Replicas             *int32                                  `json:"replicas,omitempty"`
    @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct {
     	Conditions           []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with
    +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
     // apply.
     func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration {
     	return &ReplicaSetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
    index 131e57a39d..775f82eef8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use
    +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use
     // with apply.
     type RollbackConfigApplyConfiguration struct {
     	Revision *int64 `json:"revision,omitempty"`
     }
     
    -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with
    +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with
     // apply.
     func RollbackConfig() *RollbackConfigApplyConfiguration {
     	return &RollbackConfigApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
    index 3aa5e2f891..4352f7fac7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use
    +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
     // with apply.
     type RollingUpdateDaemonSetApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with
    +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
     // apply.
     func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration {
     	return &RollingUpdateDaemonSetApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
    index dde5f064b0..244701a5e0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
    @@ -22,14 +22,14 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use
    +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
     // with apply.
     type RollingUpdateDeploymentApplyConfiguration struct {
     	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
     	MaxSurge       *intstr.IntOrString `json:"maxSurge,omitempty"`
     }
     
    -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with
    +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
     // apply.
     func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration {
     	return &RollingUpdateDeploymentApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
    index 60a1a8430c..101aa055b0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
    @@ -25,7 +25,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
    +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
     // with apply.
     type ScaleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct {
     	Status                           *v1beta1.ScaleStatus `json:"status,omitempty"`
     }
     
    -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
    +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
     // apply.
     func Scale() *ScaleApplyConfiguration {
     	b := &ScaleApplyConfiguration{}
    @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp
     	b.Status = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ScaleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
    index cd21214f5a..4e5805f394 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
    +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
     // with apply.
     type ExemptPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
     	LendablePercent          *int32 `json:"lendablePercent,omitempty"`
     }
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
    +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
     // apply.
     func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
     	return &ExemptPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
    index d9c8a79cc8..0f3b61af97 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/flowcontrol/v1"
     )
     
    -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use
    +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
     // with apply.
     type FlowDistinguisherMethodApplyConfiguration struct {
     	Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"`
     }
     
    -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with
    +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
     // apply.
     func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
     	return &FlowDistinguisherMethodApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
    index 8809fafbae..9e3978af5b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use
    +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
     // with apply.
     type FlowSchemaApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct {
     	Status                           *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with
    +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
     // apply.
     func FlowSchema(name string) *FlowSchemaApplyConfiguration {
     	b := &FlowSchemaApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *FlowSchemaApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
    index 808ab09a55..5f26a66d2f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use
    +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
     // with apply.
     type FlowSchemaConditionApplyConfiguration struct {
     	Type               *v1.FlowSchemaConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct {
     	Message            *string                     `json:"message,omitempty"`
     }
     
    -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with
    +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
     // apply.
     func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
     	return &FlowSchemaConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
    index 2785f5baf3..4efd5d2875 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use
    +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
     // with apply.
     type FlowSchemaSpecApplyConfiguration struct {
     	PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
    @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct {
     	Rules                      []PolicyRulesWithSubjectsApplyConfiguration            `json:"rules,omitempty"`
     }
     
    -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with
    +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
     // apply.
     func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration {
     	return &FlowSchemaSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
    index 7c61360a53..6f951967e8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use
    +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
     // with apply.
     type FlowSchemaStatusApplyConfiguration struct {
     	Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with
    +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with
     // apply.
     func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration {
     	return &FlowSchemaStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
    index 92a03d8628..0be9eddfd6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use
    +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
     // with apply.
     type GroupSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with
    +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with
     // apply.
     func GroupSubject() *GroupSubjectApplyConfiguration {
     	return &GroupSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
    index c19f097035..8e27642985 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use
    +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
     // with apply.
     type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32                           `json:"nominalConcurrencyShares,omitempty"`
    @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	BorrowingLimitPercent    *int32                           `json:"borrowingLimitPercent,omitempty"`
     }
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with
    +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
     // apply.
     func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration {
     	return &LimitedPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
    index 03ff6d9103..454ed8beb4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/api/flowcontrol/v1"
     )
     
    -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use
    +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
     // with apply.
     type LimitResponseApplyConfiguration struct {
     	Type    *v1.LimitResponseType                   `json:"type,omitempty"`
     	Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
     }
     
    -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with
    +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with
     // apply.
     func LimitResponse() *LimitResponseApplyConfiguration {
     	return &LimitResponseApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
    index d9f8c2eccf..29c26b3406 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use
    +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
     // with apply.
     type NonResourcePolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with
    +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with
     // apply.
     func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration {
     	return &NonResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
    index b193efa8bf..088afdc584 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use
    +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
     // with apply.
     type PolicyRulesWithSubjectsApplyConfiguration struct {
     	Subjects         []SubjectApplyConfiguration               `json:"subjects,omitempty"`
    @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct {
     	NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
     }
     
    -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with
    +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with
     // apply.
     func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration {
     	return &PolicyRulesWithSubjectsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
    index e8a1b97c9f..bcce2679c6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use
    +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
     // with apply.
     type PriorityLevelConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct {
     	Status                           *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with
    +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
     // apply.
     func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration {
     	b := &PriorityLevelConfigurationApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
    index 6ce588c8d9..42ccbfbf9d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use
    +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
     // with apply.
     type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Type               *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Message            *string                                     `json:"message,omitempty"`
     }
     
    -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with
    +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
     // apply.
     func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration {
     	return &PriorityLevelConfigurationConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
    index 0638aee8b8..f445713f0c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use
    +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
     // with apply.
     type PriorityLevelConfigurationReferenceApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with
    +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with
     // apply.
     func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration {
     	return &PriorityLevelConfigurationReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
    index 5d88749593..2262dedca9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/flowcontrol/v1"
     )
     
    -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use
    +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
     // with apply.
     type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Type    *v1.PriorityLevelEnablement                          `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Exempt  *ExemptPriorityLevelConfigurationApplyConfiguration  `json:"exempt,omitempty"`
     }
     
    -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
    +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
     // apply.
     func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration {
     	return &PriorityLevelConfigurationSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
    index 322871edc6..ff650bc3d5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use
    +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
     // with apply.
     type PriorityLevelConfigurationStatusApplyConfiguration struct {
     	Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with
    +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with
     // apply.
     func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration {
     	return &PriorityLevelConfigurationStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
    index 69fd2c23cc..7488f9bbe2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use
    +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
     // with apply.
     type QueuingConfigurationApplyConfiguration struct {
     	Queues           *int32 `json:"queues,omitempty"`
    @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct {
     	QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
     }
     
    -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with
    +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with
     // apply.
     func QueuingConfiguration() *QueuingConfigurationApplyConfiguration {
     	return &QueuingConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
    index 0991ce9445..7428582a82 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use
    +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
     // with apply.
     type ResourcePolicyRuleApplyConfiguration struct {
     	Verbs        []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct {
     	Namespaces   []string `json:"namespaces,omitempty"`
     }
     
    -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with
    +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
     // apply.
     func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration {
     	return &ResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
    index 55787ca767..58ad10764b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use
    +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
     // with apply.
     type ServiceAccountSubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     	Name      *string `json:"name,omitempty"`
     }
     
    -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with
    +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
     // apply.
     func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration {
     	return &ServiceAccountSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
    index f02b03bdc7..1ec77ae89b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/flowcontrol/v1"
     )
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind           *v1.SubjectKind                          `json:"kind,omitempty"`
    @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct {
     	ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
    index 2d17c111c6..fd90067d4d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use
    +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
     // with apply.
     type UserSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with
    +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with
     // apply.
     func UserSubject() *UserSubjectApplyConfiguration {
     	return &UserSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
    index 0710480900..45ccc5cb75 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
    +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
     // with apply.
     type ExemptPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
     	LendablePercent          *int32 `json:"lendablePercent,omitempty"`
     }
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
    +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
     // apply.
     func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
     	return &ExemptPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
    index 6dc1bb4d68..29a8999b8d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
    @@ -22,13 +22,13 @@ import (
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     )
     
    -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use
    +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
     // with apply.
     type FlowDistinguisherMethodApplyConfiguration struct {
     	Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"`
     }
     
    -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with
    +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
     // apply.
     func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
     	return &FlowDistinguisherMethodApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
    index f44313f54e..09bd258905 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use
    +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
     // with apply.
     type FlowSchemaApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct {
     	Status                           *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with
    +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
     // apply.
     func FlowSchema(name string) *FlowSchemaApplyConfiguration {
     	b := &FlowSchemaApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *FlowSchemaApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
    index b62e9a22ff..d1c3dbec6f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use
    +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
     // with apply.
     type FlowSchemaConditionApplyConfiguration struct {
     	Type               *v1beta1.FlowSchemaConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with
    +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
     // apply.
     func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
     	return &FlowSchemaConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
    index 8d72c2d0d7..1d6e8fc58e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use
    +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
     // with apply.
     type FlowSchemaSpecApplyConfiguration struct {
     	PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
    @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct {
     	Rules                      []PolicyRulesWithSubjectsApplyConfiguration            `json:"rules,omitempty"`
     }
     
    -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with
    +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
     // apply.
     func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration {
     	return &FlowSchemaSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
    index 6bc6d0543a..5ad8a432b2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use
    +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
     // with apply.
     type FlowSchemaStatusApplyConfiguration struct {
     	Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with
    +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with
     // apply.
     func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration {
     	return &FlowSchemaStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
    index 95b416e426..cc274fe2f3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use
    +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
     // with apply.
     type GroupSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with
    +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with
     // apply.
     func GroupSubject() *GroupSubjectApplyConfiguration {
     	return &GroupSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
    index 6f57169e1f..0fe5feca12 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use
    +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
     // with apply.
     type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	AssuredConcurrencyShares *int32                           `json:"assuredConcurrencyShares,omitempty"`
    @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	BorrowingLimitPercent    *int32                           `json:"borrowingLimitPercent,omitempty"`
     }
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with
    +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
     // apply.
     func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration {
     	return &LimitedPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
    index 86e1bef6b9..66f3276010 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
    @@ -22,14 +22,14 @@ import (
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     )
     
    -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use
    +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
     // with apply.
     type LimitResponseApplyConfiguration struct {
     	Type    *v1beta1.LimitResponseType              `json:"type,omitempty"`
     	Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
     }
     
    -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with
    +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with
     // apply.
     func LimitResponse() *LimitResponseApplyConfiguration {
     	return &LimitResponseApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
    index 594ebc9912..3c571ccb06 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use
    +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
     // with apply.
     type NonResourcePolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with
    +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with
     // apply.
     func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration {
     	return &NonResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
    index ea5b266b4c..32a082dc76 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use
    +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
     // with apply.
     type PolicyRulesWithSubjectsApplyConfiguration struct {
     	Subjects         []SubjectApplyConfiguration               `json:"subjects,omitempty"`
    @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct {
     	NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
     }
     
    -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with
    +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with
     // apply.
     func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration {
     	return &PolicyRulesWithSubjectsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
    index 84324dbfdc..c4243f874c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use
    +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
     // with apply.
     type PriorityLevelConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct {
     	Status                           *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with
    +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
     // apply.
     func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration {
     	b := &PriorityLevelConfigurationApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
    index 59bc610510..1ad4a554b7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use
    +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
     // with apply.
     type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Type               *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Message            *string                                          `json:"message,omitempty"`
     }
     
    -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with
    +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
     // apply.
     func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration {
     	return &PriorityLevelConfigurationConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
    index c44bcc08b4..b5e773e82a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use
    +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
     // with apply.
     type PriorityLevelConfigurationReferenceApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with
    +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with
     // apply.
     func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration {
     	return &PriorityLevelConfigurationReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
    index 19146d9f66..b013845f43 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     )
     
    -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use
    +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
     // with apply.
     type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Type    *v1beta1.PriorityLevelEnablement                     `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Exempt  *ExemptPriorityLevelConfigurationApplyConfiguration  `json:"exempt,omitempty"`
     }
     
    -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
    +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
     // apply.
     func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration {
     	return &PriorityLevelConfigurationSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
    index 3c27e6aa62..875b01efec 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use
    +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
     // with apply.
     type PriorityLevelConfigurationStatusApplyConfiguration struct {
     	Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with
    +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with
     // apply.
     func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration {
     	return &PriorityLevelConfigurationStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
    index 5e6e6e7b01..85a8b88630 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use
    +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
     // with apply.
     type QueuingConfigurationApplyConfiguration struct {
     	Queues           *int32 `json:"queues,omitempty"`
    @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct {
     	QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
     }
     
    -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with
    +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with
     // apply.
     func QueuingConfiguration() *QueuingConfigurationApplyConfiguration {
     	return &QueuingConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
    index 2e12ee1cc0..5c67dad759 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use
    +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
     // with apply.
     type ResourcePolicyRuleApplyConfiguration struct {
     	Verbs        []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct {
     	Namespaces   []string `json:"namespaces,omitempty"`
     }
     
    -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with
    +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
     // apply.
     func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration {
     	return &ResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
    index f5a146a9b1..439e5ff753 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use
    +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
     // with apply.
     type ServiceAccountSubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     	Name      *string `json:"name,omitempty"`
     }
     
    -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with
    +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
     // apply.
     func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration {
     	return &ServiceAccountSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
    index af571029fc..b5c231f6d2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     )
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind           *v1beta1.SubjectKind                     `json:"kind,omitempty"`
    @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct {
     	ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
    index 35bf27a593..bc2deae4c2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use
    +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
     // with apply.
     type UserSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with
    +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with
     // apply.
     func UserSubject() *UserSubjectApplyConfiguration {
     	return &UserSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
    index d6bc330fe7..0c02d9b389 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta2
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
    +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
     // with apply.
     type ExemptPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
     	LendablePercent          *int32 `json:"lendablePercent,omitempty"`
     }
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
    +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
     // apply.
     func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
     	return &ExemptPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
    index 924f966d48..e3c4b97a7b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
    @@ -22,13 +22,13 @@ import (
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     )
     
    -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use
    +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
     // with apply.
     type FlowDistinguisherMethodApplyConfiguration struct {
     	Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"`
     }
     
    -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with
    +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
     // apply.
     func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
     	return &FlowDistinguisherMethodApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
    index 63a5f0aa30..ffc3af950a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use
    +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
     // with apply.
     type FlowSchemaApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct {
     	Status                           *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with
    +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
     // apply.
     func FlowSchema(name string) *FlowSchemaApplyConfiguration {
     	b := &FlowSchemaApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *FlowSchemaApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
    index 04dfcbf11a..44571d263d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use
    +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
     // with apply.
     type FlowSchemaConditionApplyConfiguration struct {
     	Type               *v1beta2.FlowSchemaConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with
    +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
     // apply.
     func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
     	return &FlowSchemaConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
    index a5477e2768..6eab63bfa9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use
    +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
     // with apply.
     type FlowSchemaSpecApplyConfiguration struct {
     	PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
    @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct {
     	Rules                      []PolicyRulesWithSubjectsApplyConfiguration            `json:"rules,omitempty"`
     }
     
    -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with
    +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
     // apply.
     func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration {
     	return &FlowSchemaSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
    index 67c5be2cbe..70ac997e45 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use
    +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
     // with apply.
     type FlowSchemaStatusApplyConfiguration struct {
     	Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with
    +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with
     // apply.
     func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration {
     	return &FlowSchemaStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
    index b670f2cfd9..25207d7c1a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use
    +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
     // with apply.
     type GroupSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with
    +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with
     // apply.
     func GroupSubject() *GroupSubjectApplyConfiguration {
     	return &GroupSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
    index 563b185c74..298dd46370 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use
    +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
     // with apply.
     type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	AssuredConcurrencyShares *int32                           `json:"assuredConcurrencyShares,omitempty"`
    @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	BorrowingLimitPercent    *int32                           `json:"borrowingLimitPercent,omitempty"`
     }
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with
    +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
     // apply.
     func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration {
     	return &LimitedPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
    index a9b7661fb2..38a513d306 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
    @@ -22,14 +22,14 @@ import (
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     )
     
    -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use
    +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
     // with apply.
     type LimitResponseApplyConfiguration struct {
     	Type    *v1beta2.LimitResponseType              `json:"type,omitempty"`
     	Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
     }
     
    -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with
    +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with
     // apply.
     func LimitResponse() *LimitResponseApplyConfiguration {
     	return &LimitResponseApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
    index cb8ba0afd6..5032ee4898 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta2
     
    -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use
    +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
     // with apply.
     type NonResourcePolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with
    +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with
     // apply.
     func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration {
     	return &NonResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
    index 179c3979db..2bb8c87182 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use
    +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
     // with apply.
     type PolicyRulesWithSubjectsApplyConfiguration struct {
     	Subjects         []SubjectApplyConfiguration               `json:"subjects,omitempty"`
    @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct {
     	NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
     }
     
    -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with
    +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with
     // apply.
     func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration {
     	return &PolicyRulesWithSubjectsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
    index 3256b36300..7d52ca2c2a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use
    +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
     // with apply.
     type PriorityLevelConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct {
     	Status                           *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with
    +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
     // apply.
     func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration {
     	b := &PriorityLevelConfigurationApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
    index f742adeff0..ddb17e9843 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use
    +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
     // with apply.
     type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Type               *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Message            *string                                          `json:"message,omitempty"`
     }
     
    -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with
    +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
     // apply.
     func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration {
     	return &PriorityLevelConfigurationConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
    index 581b451ffd..bbf718b60f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use
    +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
     // with apply.
     type PriorityLevelConfigurationReferenceApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with
    +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with
     // apply.
     func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration {
     	return &PriorityLevelConfigurationReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
    index 994a8a16a2..c083ad0ba6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     )
     
    -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use
    +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
     // with apply.
     type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Type    *v1beta2.PriorityLevelEnablement                     `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Exempt  *ExemptPriorityLevelConfigurationApplyConfiguration  `json:"exempt,omitempty"`
     }
     
    -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
    +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
     // apply.
     func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration {
     	return &PriorityLevelConfigurationSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
    index b55e32be00..7a1f8790b9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use
    +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
     // with apply.
     type PriorityLevelConfigurationStatusApplyConfiguration struct {
     	Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with
    +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with
     // apply.
     func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration {
     	return &PriorityLevelConfigurationStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
    index 06246fb27e..19c34c5f83 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use
    +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
     // with apply.
     type QueuingConfigurationApplyConfiguration struct {
     	Queues           *int32 `json:"queues,omitempty"`
    @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct {
     	QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
     }
     
    -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with
    +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with
     // apply.
     func QueuingConfiguration() *QueuingConfigurationApplyConfiguration {
     	return &QueuingConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
    index b67ea1c7f9..070d2ed465 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta2
     
    -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use
    +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
     // with apply.
     type ResourcePolicyRuleApplyConfiguration struct {
     	Verbs        []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct {
     	Namespaces   []string `json:"namespaces,omitempty"`
     }
     
    -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with
    +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
     // apply.
     func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration {
     	return &ResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
    index b6cfdcad3b..c0d44721cc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta2
     
    -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use
    +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
     // with apply.
     type ServiceAccountSubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     	Name      *string `json:"name,omitempty"`
     }
     
    -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with
    +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
     // apply.
     func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration {
     	return &ServiceAccountSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
    index 7030785b8c..2cfaab43d8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
    @@ -22,7 +22,7 @@ import (
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     )
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind           *v1beta2.SubjectKind                     `json:"kind,omitempty"`
    @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct {
     	ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
    index 8c77b3e8a2..c249f042da 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta2
     
    -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use
    +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
     // with apply.
     type UserSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with
    +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with
     // apply.
     func UserSubject() *UserSubjectApplyConfiguration {
     	return &UserSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
    index b03c11d0d9..b9bf6993af 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta3
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
    +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
     // with apply.
     type ExemptPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
     	LendablePercent          *int32 `json:"lendablePercent,omitempty"`
     }
     
    -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
    +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
     // apply.
     func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
     	return &ExemptPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
    index cd45725932..49d84bd866 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
    @@ -22,13 +22,13 @@ import (
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     )
     
    -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use
    +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
     // with apply.
     type FlowDistinguisherMethodApplyConfiguration struct {
     	Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"`
     }
     
    -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with
    +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
     // apply.
     func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
     	return &FlowDistinguisherMethodApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
    index c95635360e..1f69c43b23 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use
    +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
     // with apply.
     type FlowSchemaApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct {
     	Status                           *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with
    +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
     // apply.
     func FlowSchema(name string) *FlowSchemaApplyConfiguration {
     	b := &FlowSchemaApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *FlowSchemaApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
    index 0ef3a2c921..41d623aeb8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use
    +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
     // with apply.
     type FlowSchemaConditionApplyConfiguration struct {
     	Type               *v1beta3.FlowSchemaConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct {
     	Message            *string                          `json:"message,omitempty"`
     }
     
    -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with
    +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
     // apply.
     func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
     	return &FlowSchemaConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
    index e077ed3fde..7141f6a6a1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta3
     
    -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use
    +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
     // with apply.
     type FlowSchemaSpecApplyConfiguration struct {
     	PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
    @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct {
     	Rules                      []PolicyRulesWithSubjectsApplyConfiguration            `json:"rules,omitempty"`
     }
     
    -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with
    +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
     // apply.
     func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration {
     	return &FlowSchemaSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
    index 18db1c9325..294ddc9098 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta3
     
    -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use
    +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
     // with apply.
     type FlowSchemaStatusApplyConfiguration struct {
     	Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with
    +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with
     // apply.
     func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration {
     	return &FlowSchemaStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
    index b919b711b3..6576e716ef 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta3
     
    -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use
    +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
     // with apply.
     type GroupSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with
    +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with
     // apply.
     func GroupSubject() *GroupSubjectApplyConfiguration {
     	return &GroupSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
    index 269a48721c..bd98dd683c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta3
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use
    +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
     // with apply.
     type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	NominalConcurrencyShares *int32                           `json:"nominalConcurrencyShares,omitempty"`
    @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct {
     	BorrowingLimitPercent    *int32                           `json:"borrowingLimitPercent,omitempty"`
     }
     
    -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with
    +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
     // apply.
     func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration {
     	return &LimitedPriorityLevelConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
    index b7a64ebfee..8deaabdebd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
    @@ -22,14 +22,14 @@ import (
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     )
     
    -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use
    +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
     // with apply.
     type LimitResponseApplyConfiguration struct {
     	Type    *v1beta3.LimitResponseType              `json:"type,omitempty"`
     	Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
     }
     
    -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with
    +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with
     // apply.
     func LimitResponse() *LimitResponseApplyConfiguration {
     	return &LimitResponseApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
    index ecb47f52cf..2dd0d2b068 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta3
     
    -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use
    +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
     // with apply.
     type NonResourcePolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with
    +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with
     // apply.
     func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration {
     	return &NonResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
    index e30aace194..cc64dc585b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta3
     
    -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use
    +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
     // with apply.
     type PolicyRulesWithSubjectsApplyConfiguration struct {
     	Subjects         []SubjectApplyConfiguration               `json:"subjects,omitempty"`
    @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct {
     	NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
     }
     
    -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with
    +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with
     // apply.
     func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration {
     	return &PolicyRulesWithSubjectsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
    index 6fbbbea8fe..e7d1a3a5f8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use
    +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
     // with apply.
     type PriorityLevelConfigurationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct {
     	Status                           *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with
    +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
     // apply.
     func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration {
     	b := &PriorityLevelConfigurationApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
    index 6e36b6a079..8e9687bb90 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use
    +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
     // with apply.
     type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Type               *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct {
     	Message            *string                                          `json:"message,omitempty"`
     }
     
    -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with
    +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
     // apply.
     func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration {
     	return &PriorityLevelConfigurationConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
    index cb827b1e62..566aaa916b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta3
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use
    +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
     // with apply.
     type PriorityLevelConfigurationReferenceApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with
    +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with
     // apply.
     func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration {
     	return &PriorityLevelConfigurationReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
    index 5b0680d912..9fa1112ce6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     )
     
    -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use
    +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
     // with apply.
     type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Type    *v1beta3.PriorityLevelEnablement                     `json:"type,omitempty"`
    @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct {
     	Exempt  *ExemptPriorityLevelConfigurationApplyConfiguration  `json:"exempt,omitempty"`
     }
     
    -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
    +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
     // apply.
     func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration {
     	return &PriorityLevelConfigurationSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
    index 0ee9e306eb..be2436457e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta3
     
    -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use
    +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
     // with apply.
     type PriorityLevelConfigurationStatusApplyConfiguration struct {
     	Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with
    +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with
     // apply.
     func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration {
     	return &PriorityLevelConfigurationStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
    index fc86c44431..f9a3c6d1a6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta3
     
    -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use
    +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
     // with apply.
     type QueuingConfigurationApplyConfiguration struct {
     	Queues           *int32 `json:"queues,omitempty"`
    @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct {
     	QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
     }
     
    -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with
    +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with
     // apply.
     func QueuingConfiguration() *QueuingConfigurationApplyConfiguration {
     	return &QueuingConfigurationApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
    index 72623ffe49..e38f711db0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta3
     
    -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use
    +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
     // with apply.
     type ResourcePolicyRuleApplyConfiguration struct {
     	Verbs        []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct {
     	Namespaces   []string `json:"namespaces,omitempty"`
     }
     
    -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with
    +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
     // apply.
     func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration {
     	return &ResourcePolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
    index e2d6b1b213..a5ed40c2ae 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta3
     
    -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use
    +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
     // with apply.
     type ServiceAccountSubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     	Name      *string `json:"name,omitempty"`
     }
     
    -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with
    +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
     // apply.
     func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration {
     	return &ServiceAccountSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
    index f13b8f3ec5..c412b2a7a2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
    @@ -22,7 +22,7 @@ import (
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     )
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind           *v1beta3.SubjectKind                     `json:"kind,omitempty"`
    @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct {
     	ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
    index 3db3abbc1a..7b3ec2ba82 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta3
     
    -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use
    +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
     // with apply.
     type UserSubjectApplyConfiguration struct {
     	Name *string `json:"name,omitempty"`
     }
     
    -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with
    +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with
     // apply.
     func UserSubject() *UserSubjectApplyConfiguration {
     	return &UserSubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go
    new file mode 100644
    index 0000000000..91944002d4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go
    @@ -0,0 +1,262 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ImageReviewApplyConfiguration represents a declarative configuration of the ImageReview type for use
    +// with apply.
    +type ImageReviewApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *ImageReviewSpecApplyConfiguration   `json:"spec,omitempty"`
    +	Status                           *ImageReviewStatusApplyConfiguration `json:"status,omitempty"`
    +}
    +
    +// ImageReview constructs a declarative configuration of the ImageReview type for use with
    +// apply.
    +func ImageReview(name string) *ImageReviewApplyConfiguration {
    +	b := &ImageReviewApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("ImageReview")
    +	b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
    +	return b
    +}
    +
    +// ExtractImageReview extracts the applied configuration owned by fieldManager from
    +// imageReview. If no managedFields are found in imageReview for fieldManager, a
    +// ImageReviewApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// imageReview must be a unmodified ImageReview API object that was retrieved from the Kubernetes API.
    +// ExtractImageReview provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
    +	return extractImageReview(imageReview, fieldManager, "")
    +}
    +
    +// ExtractImageReviewStatus is the same as ExtractImageReview except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractImageReviewStatus(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
    +	return extractImageReview(imageReview, fieldManager, "status")
    +}
    +
    +func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) {
    +	b := &ImageReviewApplyConfiguration{}
    +	err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(imageReview.Name)
    +
    +	b.WithKind("ImageReview")
    +	b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageReviewApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ImageReviewApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *ImageReviewApplyConfiguration) WithFinalizers(values ...string) *ImageReviewApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *ImageReviewApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithSpec(value *ImageReviewSpecApplyConfiguration) *ImageReviewApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// WithStatus sets the Status field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Status field is set to the value of the last call.
    +func (b *ImageReviewApplyConfiguration) WithStatus(value *ImageReviewStatusApplyConfiguration) *ImageReviewApplyConfiguration {
    +	b.Status = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ImageReviewApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go
    new file mode 100644
    index 0000000000..adfdb32584
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ImageReviewContainerSpecApplyConfiguration represents a declarative configuration of the ImageReviewContainerSpec type for use
    +// with apply.
    +type ImageReviewContainerSpecApplyConfiguration struct {
    +	Image *string `json:"image,omitempty"`
    +}
    +
    +// ImageReviewContainerSpecApplyConfiguration constructs a declarative configuration of the ImageReviewContainerSpec type for use with
    +// apply.
    +func ImageReviewContainerSpec() *ImageReviewContainerSpecApplyConfiguration {
    +	return &ImageReviewContainerSpecApplyConfiguration{}
    +}
    +
    +// WithImage sets the Image field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Image field is set to the value of the last call.
    +func (b *ImageReviewContainerSpecApplyConfiguration) WithImage(value string) *ImageReviewContainerSpecApplyConfiguration {
    +	b.Image = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go
    new file mode 100644
    index 0000000000..7efc36a321
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go
    @@ -0,0 +1,68 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ImageReviewSpecApplyConfiguration represents a declarative configuration of the ImageReviewSpec type for use
    +// with apply.
    +type ImageReviewSpecApplyConfiguration struct {
    +	Containers  []ImageReviewContainerSpecApplyConfiguration `json:"containers,omitempty"`
    +	Annotations map[string]string                            `json:"annotations,omitempty"`
    +	Namespace   *string                                      `json:"namespace,omitempty"`
    +}
    +
    +// ImageReviewSpecApplyConfiguration constructs a declarative configuration of the ImageReviewSpec type for use with
    +// apply.
    +func ImageReviewSpec() *ImageReviewSpecApplyConfiguration {
    +	return &ImageReviewSpecApplyConfiguration{}
    +}
    +
    +// WithContainers adds the given value to the Containers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Containers field.
    +func (b *ImageReviewSpecApplyConfiguration) WithContainers(values ...*ImageReviewContainerSpecApplyConfiguration) *ImageReviewSpecApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithContainers")
    +		}
    +		b.Containers = append(b.Containers, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ImageReviewSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewSpecApplyConfiguration {
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ImageReviewSpecApplyConfiguration) WithNamespace(value string) *ImageReviewSpecApplyConfiguration {
    +	b.Namespace = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go
    new file mode 100644
    index 0000000000..e26a427e69
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go
    @@ -0,0 +1,63 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ImageReviewStatusApplyConfiguration represents a declarative configuration of the ImageReviewStatus type for use
    +// with apply.
    +type ImageReviewStatusApplyConfiguration struct {
    +	Allowed          *bool             `json:"allowed,omitempty"`
    +	Reason           *string           `json:"reason,omitempty"`
    +	AuditAnnotations map[string]string `json:"auditAnnotations,omitempty"`
    +}
    +
    +// ImageReviewStatusApplyConfiguration constructs a declarative configuration of the ImageReviewStatus type for use with
    +// apply.
    +func ImageReviewStatus() *ImageReviewStatusApplyConfiguration {
    +	return &ImageReviewStatusApplyConfiguration{}
    +}
    +
    +// WithAllowed sets the Allowed field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Allowed field is set to the value of the last call.
    +func (b *ImageReviewStatusApplyConfiguration) WithAllowed(value bool) *ImageReviewStatusApplyConfiguration {
    +	b.Allowed = &value
    +	return b
    +}
    +
    +// WithReason sets the Reason field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Reason field is set to the value of the last call.
    +func (b *ImageReviewStatusApplyConfiguration) WithReason(value string) *ImageReviewStatusApplyConfiguration {
    +	b.Reason = &value
    +	return b
    +}
    +
    +// WithAuditAnnotations puts the entries into the AuditAnnotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the AuditAnnotations field,
    +// overwriting an existing map entries in AuditAnnotations field with the same key.
    +func (b *ImageReviewStatusApplyConfiguration) WithAuditAnnotations(entries map[string]string) *ImageReviewStatusApplyConfiguration {
    +	if b.AuditAnnotations == nil && len(entries) > 0 {
    +		b.AuditAnnotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.AuditAnnotations[k] = v
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
    index 47bfb44e0c..43c9ae05a1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
    @@ -4356,6 +4356,54 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: leaseTransitions
           type:
             scalar: numeric
    +    - name: preferredHolder
    +      type:
    +        scalar: string
    +    - name: renewTime
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
    +    - name: strategy
    +      type:
    +        scalar: string
    +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate
    +  map:
    +    fields:
    +    - name: apiVersion
    +      type:
    +        scalar: string
    +    - name: kind
    +      type:
    +        scalar: string
    +    - name: metadata
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +      default: {}
    +    - name: spec
    +      type:
    +        namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec
    +      default: {}
    +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec
    +  map:
    +    fields:
    +    - name: binaryVersion
    +      type:
    +        scalar: string
    +    - name: emulationVersion
    +      type:
    +        scalar: string
    +    - name: leaseName
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: pingTime
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
    +    - name: preferredStrategies
    +      type:
    +        list:
    +          elementType:
    +            scalar: string
    +          elementRelationship: atomic
         - name: renewTime
           type:
             namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
    @@ -4391,9 +4439,15 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: leaseTransitions
           type:
             scalar: numeric
    +    - name: preferredHolder
    +      type:
    +        scalar: string
         - name: renewTime
           type:
             namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
    +    - name: strategy
    +      type:
    +        scalar: string
     - name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource
       map:
         fields:
    @@ -4454,6 +4508,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: cachingMode
           type:
             scalar: string
    +      default: ReadWrite
         - name: diskName
           type:
             scalar: string
    @@ -4465,12 +4520,15 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: fsType
           type:
             scalar: string
    +      default: ext4
         - name: kind
           type:
             scalar: string
    +      default: Shared
         - name: readOnly
           type:
             scalar: boolean
    +      default: false
     - name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource
       map:
         fields:
    @@ -4655,15 +4713,6 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             scalar: string
           default: ""
    -- name: io.k8s.api.core.v1.ClaimSource
    -  map:
    -    fields:
    -    - name: resourceClaimName
    -      type:
    -        scalar: string
    -    - name: resourceClaimTemplateName
    -      type:
    -        scalar: string
     - name: io.k8s.api.core.v1.ClientIPConfig
       map:
         fields:
    @@ -5047,6 +5096,14 @@ var schemaYAML = typed.YAMLObject(`types:
             map:
               elementType:
                 namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
    +    - name: allocatedResourcesStatus
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.core.v1.ResourceStatus
    +          elementRelationship: associative
    +          keys:
    +          - name
         - name: containerID
           type:
             scalar: string
    @@ -5084,6 +5141,9 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             namedType: io.k8s.api.core.v1.ContainerState
           default: {}
    +    - name: user
    +      type:
    +        namedType: io.k8s.api.core.v1.ContainerUser
         - name: volumeMounts
           type:
             list:
    @@ -5092,6 +5152,12 @@ var schemaYAML = typed.YAMLObject(`types:
               elementRelationship: associative
               keys:
               - mountPath
    +- name: io.k8s.api.core.v1.ContainerUser
    +  map:
    +    fields:
    +    - name: linux
    +      type:
    +        namedType: io.k8s.api.core.v1.LinuxContainerUser
     - name: io.k8s.api.core.v1.DaemonEndpoint
       map:
         fields:
    @@ -5661,6 +5727,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: ip
           type:
             scalar: string
    +      default: ""
     - name: io.k8s.api.core.v1.HostPathVolumeSource
       map:
         fields:
    @@ -5693,6 +5760,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: iscsiInterface
           type:
             scalar: string
    +      default: default
         - name: lun
           type:
             scalar: numeric
    @@ -5735,6 +5803,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: iscsiInterface
           type:
             scalar: string
    +      default: default
         - name: lun
           type:
             scalar: numeric
    @@ -5755,6 +5824,15 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             scalar: string
           default: ""
    +- name: io.k8s.api.core.v1.ImageVolumeSource
    +  map:
    +    fields:
    +    - name: pullPolicy
    +      type:
    +        scalar: string
    +    - name: reference
    +      type:
    +        scalar: string
     - name: io.k8s.api.core.v1.KeyToPath
       map:
         fields:
    @@ -5851,6 +5929,23 @@ var schemaYAML = typed.YAMLObject(`types:
               elementType:
                 namedType: io.k8s.api.core.v1.LimitRangeItem
               elementRelationship: atomic
    +- name: io.k8s.api.core.v1.LinuxContainerUser
    +  map:
    +    fields:
    +    - name: gid
    +      type:
    +        scalar: numeric
    +      default: 0
    +    - name: supplementalGroups
    +      type:
    +        list:
    +          elementType:
    +            scalar: numeric
    +          elementRelationship: atomic
    +    - name: uid
    +      type:
    +        scalar: numeric
    +      default: 0
     - name: io.k8s.api.core.v1.LoadBalancerIngress
       map:
         fields:
    @@ -6079,6 +6174,12 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             namedType: io.k8s.api.core.v1.DaemonEndpoint
           default: {}
    +- name: io.k8s.api.core.v1.NodeFeatures
    +  map:
    +    fields:
    +    - name: supplementalGroupsPolicy
    +      type:
    +        scalar: boolean
     - name: io.k8s.api.core.v1.NodeRuntimeHandler
       map:
         fields:
    @@ -6095,6 +6196,9 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: recursiveReadOnlyMounts
           type:
             scalar: boolean
    +    - name: userNamespaces
    +      type:
    +        scalar: boolean
     - name: io.k8s.api.core.v1.NodeSelector
       map:
         fields:
    @@ -6204,6 +6308,9 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             namedType: io.k8s.api.core.v1.NodeDaemonEndpoints
           default: {}
    +    - name: features
    +      type:
    +        namedType: io.k8s.api.core.v1.NodeFeatures
         - name: images
           type:
             list:
    @@ -6747,6 +6854,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: ip
           type:
             scalar: string
    +      default: ""
     - name: io.k8s.api.core.v1.PodOS
       map:
         fields:
    @@ -6768,10 +6876,12 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             scalar: string
           default: ""
    -    - name: source
    +    - name: resourceClaimName
           type:
    -        namedType: io.k8s.api.core.v1.ClaimSource
    -      default: {}
    +        scalar: string
    +    - name: resourceClaimTemplateName
    +      type:
    +        scalar: string
     - name: io.k8s.api.core.v1.PodResourceClaimStatus
       map:
         fields:
    @@ -6822,6 +6932,9 @@ var schemaYAML = typed.YAMLObject(`types:
               elementType:
                 scalar: numeric
               elementRelationship: atomic
    +    - name: supplementalGroupsPolicy
    +      type:
    +        scalar: string
         - name: sysctls
           type:
             list:
    @@ -7233,6 +7346,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: keyring
           type:
             scalar: string
    +      default: /etc/ceph/keyring
         - name: monitors
           type:
             list:
    @@ -7242,6 +7356,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: pool
           type:
             scalar: string
    +      default: rbd
         - name: readOnly
           type:
             scalar: boolean
    @@ -7251,6 +7366,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: user
           type:
             scalar: string
    +      default: admin
     - name: io.k8s.api.core.v1.RBDVolumeSource
       map:
         fields:
    @@ -7264,6 +7380,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: keyring
           type:
             scalar: string
    +      default: /etc/ceph/keyring
         - name: monitors
           type:
             list:
    @@ -7273,6 +7390,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: pool
           type:
             scalar: string
    +      default: rbd
         - name: readOnly
           type:
             scalar: boolean
    @@ -7282,6 +7400,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: user
           type:
             scalar: string
    +      default: admin
     - name: io.k8s.api.core.v1.ReplicationController
       map:
         fields:
    @@ -7375,6 +7494,9 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             scalar: string
           default: ""
    +    - name: request
    +      type:
    +        scalar: string
     - name: io.k8s.api.core.v1.ResourceFieldSelector
       map:
         fields:
    @@ -7389,6 +7511,16 @@ var schemaYAML = typed.YAMLObject(`types:
             scalar: string
           default: ""
         elementRelationship: atomic
    +- name: io.k8s.api.core.v1.ResourceHealth
    +  map:
    +    fields:
    +    - name: health
    +      type:
    +        scalar: string
    +    - name: resourceID
    +      type:
    +        scalar: string
    +      default: ""
     - name: io.k8s.api.core.v1.ResourceQuota
       map:
         fields:
    @@ -7461,6 +7593,21 @@ var schemaYAML = typed.YAMLObject(`types:
             map:
               elementType:
                 namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
    +- name: io.k8s.api.core.v1.ResourceStatus
    +  map:
    +    fields:
    +    - name: name
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: resources
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.core.v1.ResourceHealth
    +          elementRelationship: associative
    +          keys:
    +          - resourceID
     - name: io.k8s.api.core.v1.SELinuxOptions
       map:
         fields:
    @@ -7482,6 +7629,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: fsType
           type:
             scalar: string
    +      default: xfs
         - name: gateway
           type:
             scalar: string
    @@ -7501,6 +7649,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: storageMode
           type:
             scalar: string
    +      default: ThinProvisioned
         - name: storagePool
           type:
             scalar: string
    @@ -7517,6 +7666,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: fsType
           type:
             scalar: string
    +      default: xfs
         - name: gateway
           type:
             scalar: string
    @@ -7536,6 +7686,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: storageMode
           type:
             scalar: string
    +      default: ThinProvisioned
         - name: storagePool
           type:
             scalar: string
    @@ -8157,6 +8308,9 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: hostPath
           type:
             namedType: io.k8s.api.core.v1.HostPathVolumeSource
    +    - name: image
    +      type:
    +        namedType: io.k8s.api.core.v1.ImageVolumeSource
         - name: iscsi
           type:
             namedType: io.k8s.api.core.v1.ISCSIVolumeSource
    @@ -10939,6 +11093,7 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: number
           type:
             scalar: numeric
    +    elementRelationship: atomic
     - name: io.k8s.api.networking.v1alpha1.IPAddress
       map:
         fields:
    @@ -11040,6 +11195,29 @@ var schemaYAML = typed.YAMLObject(`types:
               elementType:
                 namedType: io.k8s.api.networking.v1beta1.HTTPIngressPath
               elementRelationship: atomic
    +- name: io.k8s.api.networking.v1beta1.IPAddress
    +  map:
    +    fields:
    +    - name: apiVersion
    +      type:
    +        scalar: string
    +    - name: kind
    +      type:
    +        scalar: string
    +    - name: metadata
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +      default: {}
    +    - name: spec
    +      type:
    +        namedType: io.k8s.api.networking.v1beta1.IPAddressSpec
    +      default: {}
    +- name: io.k8s.api.networking.v1beta1.IPAddressSpec
    +  map:
    +    fields:
    +    - name: parentRef
    +      type:
    +        namedType: io.k8s.api.networking.v1beta1.ParentReference
     - name: io.k8s.api.networking.v1beta1.Ingress
       map:
         fields:
    @@ -11206,6 +11384,62 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: secretName
           type:
             scalar: string
    +- name: io.k8s.api.networking.v1beta1.ParentReference
    +  map:
    +    fields:
    +    - name: group
    +      type:
    +        scalar: string
    +    - name: name
    +      type:
    +        scalar: string
    +    - name: namespace
    +      type:
    +        scalar: string
    +    - name: resource
    +      type:
    +        scalar: string
    +- name: io.k8s.api.networking.v1beta1.ServiceCIDR
    +  map:
    +    fields:
    +    - name: apiVersion
    +      type:
    +        scalar: string
    +    - name: kind
    +      type:
    +        scalar: string
    +    - name: metadata
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +      default: {}
    +    - name: spec
    +      type:
    +        namedType: io.k8s.api.networking.v1beta1.ServiceCIDRSpec
    +      default: {}
    +    - name: status
    +      type:
    +        namedType: io.k8s.api.networking.v1beta1.ServiceCIDRStatus
    +      default: {}
    +- name: io.k8s.api.networking.v1beta1.ServiceCIDRSpec
    +  map:
    +    fields:
    +    - name: cidrs
    +      type:
    +        list:
    +          elementType:
    +            scalar: string
    +          elementRelationship: atomic
    +- name: io.k8s.api.networking.v1beta1.ServiceCIDRStatus
    +  map:
    +    fields:
    +    - name: conditions
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
    +          elementRelationship: associative
    +          keys:
    +          - type
     - name: io.k8s.api.node.v1.Overhead
       map:
         fields:
    @@ -12010,53 +12244,81 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: namespace
           type:
             scalar: string
    -- name: io.k8s.api.resource.v1alpha2.AllocationResult
    +- name: io.k8s.api.resource.v1alpha3.AllocationResult
       map:
         fields:
    -    - name: availableOnNodes
    +    - name: controller
    +      type:
    +        scalar: string
    +    - name: devices
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult
    +      default: {}
    +    - name: nodeSelector
           type:
             namedType: io.k8s.api.core.v1.NodeSelector
    -    - name: resourceHandles
    +- name: io.k8s.api.resource.v1alpha3.BasicDevice
    +  map:
    +    fields:
    +    - name: attributes
           type:
    -        list:
    +        map:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.ResourceHandle
    -          elementRelationship: atomic
    -    - name: shareable
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceAttribute
    +    - name: capacity
           type:
    -        scalar: boolean
    -- name: io.k8s.api.resource.v1alpha2.DriverAllocationResult
    +        map:
    +          elementType:
    +            namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
    +- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector
       map:
         fields:
    -    - name: namedResources
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult
    -    - name: vendorRequestParameters
    +    - name: expression
           type:
    -        namedType: __untyped_atomic_
    -- name: io.k8s.api.resource.v1alpha2.DriverRequests
    +        scalar: string
    +      default: ""
    +- name: io.k8s.api.resource.v1alpha3.Device
       map:
         fields:
    -    - name: driverName
    +    - name: basic
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.BasicDevice
    +    - name: name
           type:
             scalar: string
    +      default: ""
    +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration
    +  map:
    +    fields:
    +    - name: opaque
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
         - name: requests
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.ResourceRequest
    +            scalar: string
               elementRelationship: atomic
    -    - name: vendorParameters
    +    - name: source
           type:
    -        namedType: __untyped_atomic_
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult
    +        scalar: string
    +      default: ""
    +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationResult
       map:
         fields:
    -    - name: name
    +    - name: config
           type:
    -        scalar: string
    -      default: ""
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration
    +          elementRelationship: atomic
    +    - name: results
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult
    +          elementRelationship: atomic
    +- name: io.k8s.api.resource.v1alpha3.DeviceAttribute
       map:
         fields:
         - name: bool
    @@ -12065,80 +12327,161 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: int
           type:
             scalar: numeric
    -    - name: intSlice
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice
    -    - name: name
    -      type:
    -        scalar: string
    -      default: ""
    -    - name: quantity
    -      type:
    -        namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
         - name: string
           type:
             scalar: string
    -    - name: stringSlice
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice
         - name: version
           type:
             scalar: string
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesFilter
    +- name: io.k8s.api.resource.v1alpha3.DeviceClaim
       map:
         fields:
    -    - name: selector
    +    - name: config
           type:
    -        scalar: string
    -      default: ""
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesInstance
    -  map:
    -    fields:
    -    - name: attributes
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration
    +          elementRelationship: atomic
    +    - name: constraints
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceConstraint
               elementRelationship: atomic
    -    - name: name
    +    - name: requests
           type:
    -        scalar: string
    -      default: ""
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceRequest
    +          elementRelationship: atomic
    +- name: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration
       map:
         fields:
    -    - name: ints
    +    - name: opaque
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
    +    - name: requests
           type:
             list:
               elementType:
    -            scalar: numeric
    +            scalar: string
               elementRelationship: atomic
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesRequest
    +- name: io.k8s.api.resource.v1alpha3.DeviceClass
       map:
         fields:
    -    - name: selector
    +    - name: apiVersion
           type:
             scalar: string
    -      default: ""
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesResources
    +    - name: kind
    +      type:
    +        scalar: string
    +    - name: metadata
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +      default: {}
    +    - name: spec
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.DeviceClassSpec
    +      default: {}
    +- name: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration
    +  map:
    +    fields:
    +    - name: opaque
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
    +- name: io.k8s.api.resource.v1alpha3.DeviceClassSpec
       map:
         fields:
    -    - name: instances
    +    - name: config
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration
    +          elementRelationship: atomic
    +    - name: selectors
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.NamedResourcesInstance
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceSelector
               elementRelationship: atomic
    -- name: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice
    +    - name: suitableNodes
    +      type:
    +        namedType: io.k8s.api.core.v1.NodeSelector
    +- name: io.k8s.api.resource.v1alpha3.DeviceConstraint
       map:
         fields:
    -    - name: strings
    +    - name: matchAttribute
    +      type:
    +        scalar: string
    +    - name: requests
           type:
             list:
               elementType:
                 scalar: string
               elementRelationship: atomic
    -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext
    +- name: io.k8s.api.resource.v1alpha3.DeviceRequest
    +  map:
    +    fields:
    +    - name: adminAccess
    +      type:
    +        scalar: boolean
    +      default: false
    +    - name: allocationMode
    +      type:
    +        scalar: string
    +    - name: count
    +      type:
    +        scalar: numeric
    +    - name: deviceClassName
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: name
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: selectors
    +      type:
    +        list:
    +          elementType:
    +            namedType: io.k8s.api.resource.v1alpha3.DeviceSelector
    +          elementRelationship: atomic
    +- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult
    +  map:
    +    fields:
    +    - name: device
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: driver
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: pool
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: request
    +      type:
    +        scalar: string
    +      default: ""
    +- name: io.k8s.api.resource.v1alpha3.DeviceSelector
    +  map:
    +    fields:
    +    - name: cel
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector
    +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
    +  map:
    +    fields:
    +    - name: driver
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: parameters
    +      type:
    +        namedType: __untyped_atomic_
    +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext
       map:
         fields:
         - name: apiVersion
    @@ -12153,13 +12496,13 @@ var schemaYAML = typed.YAMLObject(`types:
           default: {}
         - name: spec
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec
    +        namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec
           default: {}
         - name: status
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus
    +        namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus
           default: {}
    -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec
    +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec
       map:
         fields:
         - name: potentialNodes
    @@ -12171,18 +12514,18 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: selectedNode
           type:
             scalar: string
    -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus
    +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus
       map:
         fields:
         - name: resourceClaims
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus
    +            namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus
               elementRelationship: associative
               keys:
               - name
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaim
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaim
       map:
         fields:
         - name: apiVersion
    @@ -12197,13 +12540,13 @@ var schemaYAML = typed.YAMLObject(`types:
           default: {}
         - name: spec
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec
    +        namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec
           default: {}
         - name: status
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus
    +        namedType: io.k8s.api.resource.v1alpha3.ResourceClaimStatus
           default: {}
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference
       map:
         fields:
         - name: apiGroup
    @@ -12221,91 +12564,47 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             scalar: string
           default: ""
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParameters
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus
       map:
         fields:
    -    - name: apiVersion
    -      type:
    -        scalar: string
    -    - name: driverRequests
    -      type:
    -        list:
    -          elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.DriverRequests
    -          elementRelationship: atomic
    -    - name: generatedFrom
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference
    -    - name: kind
    -      type:
    -        scalar: string
    -    - name: metadata
    -      type:
    -        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    -      default: {}
    -    - name: shareable
    -      type:
    -        scalar: boolean
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference
    -  map:
    -    fields:
    -    - name: apiGroup
    -      type:
    -        scalar: string
    -    - name: kind
    -      type:
    -        scalar: string
    -      default: ""
         - name: name
           type:
             scalar: string
           default: ""
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus
    -  map:
    -    fields:
    -    - name: name
    -      type:
    -        scalar: string
         - name: unsuitableNodes
           type:
             list:
               elementType:
                 scalar: string
               elementRelationship: atomic
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec
       map:
         fields:
    -    - name: allocationMode
    +    - name: controller
           type:
             scalar: string
    -    - name: parametersRef
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference
    -    - name: resourceClassName
    +    - name: devices
           type:
    -        scalar: string
    -      default: ""
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus
    +        namedType: io.k8s.api.resource.v1alpha3.DeviceClaim
    +      default: {}
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimStatus
       map:
         fields:
         - name: allocation
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.AllocationResult
    +        namedType: io.k8s.api.resource.v1alpha3.AllocationResult
         - name: deallocationRequested
           type:
             scalar: boolean
    -    - name: driverName
    -      type:
    -        scalar: string
         - name: reservedFor
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference
    +            namedType: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference
               elementRelationship: associative
               keys:
               - uid
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplate
       map:
         fields:
         - name: apiVersion
    @@ -12320,9 +12619,9 @@ var schemaYAML = typed.YAMLObject(`types:
           default: {}
         - name: spec
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec
    +        namedType: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec
           default: {}
    -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec
    +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec
       map:
         fields:
         - name: metadata
    @@ -12331,119 +12630,29 @@ var schemaYAML = typed.YAMLObject(`types:
           default: {}
         - name: spec
           type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec
    -      default: {}
    -- name: io.k8s.api.resource.v1alpha2.ResourceClass
    -  map:
    -    fields:
    -    - name: apiVersion
    -      type:
    -        scalar: string
    -    - name: driverName
    -      type:
    -        scalar: string
    -      default: ""
    -    - name: kind
    -      type:
    -        scalar: string
    -    - name: metadata
    -      type:
    -        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    -      default: {}
    -    - name: parametersRef
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
    -    - name: structuredParameters
    -      type:
    -        scalar: boolean
    -    - name: suitableNodes
    -      type:
    -        namedType: io.k8s.api.core.v1.NodeSelector
    -- name: io.k8s.api.resource.v1alpha2.ResourceClassParameters
    -  map:
    -    fields:
    -    - name: apiVersion
    -      type:
    -        scalar: string
    -    - name: filters
    -      type:
    -        list:
    -          elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.ResourceFilter
    -          elementRelationship: atomic
    -    - name: generatedFrom
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
    -    - name: kind
    -      type:
    -        scalar: string
    -    - name: metadata
    -      type:
    -        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +        namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec
           default: {}
    -    - name: vendorParameters
    -      type:
    -        list:
    -          elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.VendorParameters
    -          elementRelationship: atomic
    -- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
    +- name: io.k8s.api.resource.v1alpha3.ResourcePool
       map:
         fields:
    -    - name: apiGroup
    -      type:
    -        scalar: string
    -    - name: kind
    +    - name: generation
           type:
    -        scalar: string
    -      default: ""
    +        scalar: numeric
    +      default: 0
         - name: name
           type:
             scalar: string
           default: ""
    -    - name: namespace
    -      type:
    -        scalar: string
    -- name: io.k8s.api.resource.v1alpha2.ResourceFilter
    -  map:
    -    fields:
    -    - name: driverName
    +    - name: resourceSliceCount
           type:
    -        scalar: string
    -    - name: namedResources
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesFilter
    -- name: io.k8s.api.resource.v1alpha2.ResourceHandle
    -  map:
    -    fields:
    -    - name: data
    -      type:
    -        scalar: string
    -    - name: driverName
    -      type:
    -        scalar: string
    -    - name: structuredData
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.StructuredResourceHandle
    -- name: io.k8s.api.resource.v1alpha2.ResourceRequest
    -  map:
    -    fields:
    -    - name: namedResources
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesRequest
    -    - name: vendorParameters
    -      type:
    -        namedType: __untyped_atomic_
    -- name: io.k8s.api.resource.v1alpha2.ResourceSlice
    +        scalar: numeric
    +      default: 0
    +- name: io.k8s.api.resource.v1alpha3.ResourceSlice
       map:
         fields:
         - name: apiVersion
           type:
             scalar: string
    -    - name: driverName
    -      type:
    -        scalar: string
    -      default: ""
         - name: kind
           type:
             scalar: string
    @@ -12451,39 +12660,36 @@ var schemaYAML = typed.YAMLObject(`types:
           type:
             namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
           default: {}
    -    - name: namedResources
    -      type:
    -        namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources
    -    - name: nodeName
    +    - name: spec
           type:
    -        scalar: string
    -- name: io.k8s.api.resource.v1alpha2.StructuredResourceHandle
    +        namedType: io.k8s.api.resource.v1alpha3.ResourceSliceSpec
    +      default: {}
    +- name: io.k8s.api.resource.v1alpha3.ResourceSliceSpec
       map:
         fields:
    -    - name: nodeName
    +    - name: allNodes
           type:
    -        scalar: string
    -    - name: results
    +        scalar: boolean
    +    - name: devices
           type:
             list:
               elementType:
    -            namedType: io.k8s.api.resource.v1alpha2.DriverAllocationResult
    +            namedType: io.k8s.api.resource.v1alpha3.Device
               elementRelationship: atomic
    -    - name: vendorClaimParameters
    -      type:
    -        namedType: __untyped_atomic_
    -    - name: vendorClassParameters
    +    - name: driver
           type:
    -        namedType: __untyped_atomic_
    -- name: io.k8s.api.resource.v1alpha2.VendorParameters
    -  map:
    -    fields:
    -    - name: driverName
    +        scalar: string
    +      default: ""
    +    - name: nodeName
           type:
             scalar: string
    -    - name: parameters
    +    - name: nodeSelector
           type:
    -        namedType: __untyped_atomic_
    +        namedType: io.k8s.api.core.v1.NodeSelector
    +    - name: pool
    +      type:
    +        namedType: io.k8s.api.resource.v1alpha3.ResourcePool
    +      default: {}
     - name: io.k8s.api.scheduling.v1.PriorityClass
       map:
         fields:
    @@ -13177,6 +13383,28 @@ var schemaYAML = typed.YAMLObject(`types:
         - name: detachError
           type:
             namedType: io.k8s.api.storage.v1beta1.VolumeError
    +- name: io.k8s.api.storage.v1beta1.VolumeAttributesClass
    +  map:
    +    fields:
    +    - name: apiVersion
    +      type:
    +        scalar: string
    +    - name: driverName
    +      type:
    +        scalar: string
    +      default: ""
    +    - name: kind
    +      type:
    +        scalar: string
    +    - name: metadata
    +      type:
    +        namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
    +      default: {}
    +    - name: parameters
    +      type:
    +        map:
    +          elementType:
    +            scalar: string
     - name: io.k8s.api.storage.v1beta1.VolumeError
       map:
         fields:
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
    index c84102cdde..466aaebb61 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ConditionApplyConfiguration represents an declarative configuration of the Condition type for use
    +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use
     // with apply.
     type ConditionApplyConfiguration struct {
     	Type               *string             `json:"type,omitempty"`
    @@ -33,7 +33,7 @@ type ConditionApplyConfiguration struct {
     	Message            *string             `json:"message,omitempty"`
     }
     
    -// ConditionApplyConfiguration constructs an declarative configuration of the Condition type for use with
    +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with
     // apply.
     func Condition() *ConditionApplyConfiguration {
     	return &ConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
    index 7a1d23114d..313bb9784d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
    @@ -22,7 +22,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// DeleteOptionsApplyConfiguration represents an declarative configuration of the DeleteOptions type for use
    +// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use
     // with apply.
     type DeleteOptionsApplyConfiguration struct {
     	TypeMetaApplyConfiguration `json:",inline"`
    @@ -33,7 +33,7 @@ type DeleteOptionsApplyConfiguration struct {
     	DryRun                     []string                         `json:"dryRun,omitempty"`
     }
     
    -// DeleteOptionsApplyConfiguration constructs an declarative configuration of the DeleteOptions type for use with
    +// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with
     // apply.
     func DeleteOptions() *DeleteOptionsApplyConfiguration {
     	b := &DeleteOptionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
    index 6d24bc363b..1f33c94e0c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// LabelSelectorApplyConfiguration represents an declarative configuration of the LabelSelector type for use
    +// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use
     // with apply.
     type LabelSelectorApplyConfiguration struct {
     	MatchLabels      map[string]string                            `json:"matchLabels,omitempty"`
     	MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
     }
     
    -// LabelSelectorApplyConfiguration constructs an declarative configuration of the LabelSelector type for use with
    +// LabelSelectorApplyConfiguration constructs a declarative configuration of the LabelSelector type for use with
     // apply.
     func LabelSelector() *LabelSelectorApplyConfiguration {
     	return &LabelSelectorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
    index ff70f365e6..bd9db9659b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// LabelSelectorRequirementApplyConfiguration represents an declarative configuration of the LabelSelectorRequirement type for use
    +// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use
     // with apply.
     type LabelSelectorRequirementApplyConfiguration struct {
     	Key      *string                   `json:"key,omitempty"`
    @@ -30,7 +30,7 @@ type LabelSelectorRequirementApplyConfiguration struct {
     	Values   []string                  `json:"values,omitempty"`
     }
     
    -// LabelSelectorRequirementApplyConfiguration constructs an declarative configuration of the LabelSelectorRequirement type for use with
    +// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with
     // apply.
     func LabelSelectorRequirement() *LabelSelectorRequirementApplyConfiguration {
     	return &LabelSelectorRequirementApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
    index f4d7e26812..6913df8226 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use
    +// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use
     // with apply.
     type ManagedFieldsEntryApplyConfiguration struct {
     	Manager     *string                        `json:"manager,omitempty"`
    @@ -34,7 +34,7 @@ type ManagedFieldsEntryApplyConfiguration struct {
     	Subresource *string                        `json:"subresource,omitempty"`
     }
     
    -// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with
    +// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with
     // apply.
     func ManagedFieldsEntry() *ManagedFieldsEntryApplyConfiguration {
     	return &ManagedFieldsEntryApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
    index 9b290e9680..a9419975ef 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
    @@ -23,7 +23,7 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// ObjectMetaApplyConfiguration represents an declarative configuration of the ObjectMeta type for use
    +// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use
     // with apply.
     type ObjectMetaApplyConfiguration struct {
     	Name                       *string                            `json:"name,omitempty"`
    @@ -41,7 +41,7 @@ type ObjectMetaApplyConfiguration struct {
     	Finalizers                 []string                           `json:"finalizers,omitempty"`
     }
     
    -// ObjectMetaApplyConfiguration constructs an declarative configuration of the ObjectMeta type for use with
    +// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with
     // apply.
     func ObjectMeta() *ObjectMetaApplyConfiguration {
     	return &ObjectMetaApplyConfiguration{}
    @@ -169,3 +169,8 @@ func (b *ObjectMetaApplyConfiguration) WithFinalizers(values ...string) *ObjectM
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ObjectMetaApplyConfiguration) GetName() *string {
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
    index b3117d6a4b..2776152322 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
    @@ -22,7 +22,7 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// OwnerReferenceApplyConfiguration represents an declarative configuration of the OwnerReference type for use
    +// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use
     // with apply.
     type OwnerReferenceApplyConfiguration struct {
     	APIVersion         *string    `json:"apiVersion,omitempty"`
    @@ -33,7 +33,7 @@ type OwnerReferenceApplyConfiguration struct {
     	BlockOwnerDeletion *bool      `json:"blockOwnerDeletion,omitempty"`
     }
     
    -// OwnerReferenceApplyConfiguration constructs an declarative configuration of the OwnerReference type for use with
    +// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with
     // apply.
     func OwnerReference() *OwnerReferenceApplyConfiguration {
     	return &OwnerReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
    index f627733f1e..8f8b6c6b3a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
    @@ -22,14 +22,14 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// PreconditionsApplyConfiguration represents an declarative configuration of the Preconditions type for use
    +// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use
     // with apply.
     type PreconditionsApplyConfiguration struct {
     	UID             *types.UID `json:"uid,omitempty"`
     	ResourceVersion *string    `json:"resourceVersion,omitempty"`
     }
     
    -// PreconditionsApplyConfiguration constructs an declarative configuration of the Preconditions type for use with
    +// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with
     // apply.
     func Preconditions() *PreconditionsApplyConfiguration {
     	return &PreconditionsApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
    index 877b0890e8..979044384c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// TypeMetaApplyConfiguration represents an declarative configuration of the TypeMeta type for use
    +// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use
     // with apply.
     type TypeMetaApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
     	APIVersion *string `json:"apiVersion,omitempty"`
     }
     
    -// TypeMetaApplyConfiguration constructs an declarative configuration of the TypeMeta type for use with
    +// TypeMetaApplyConfiguration constructs a declarative configuration of the TypeMeta type for use with
     // apply.
     func TypeMeta() *TypeMetaApplyConfiguration {
     	return &TypeMetaApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
    index 07b6a67f6a..e39670f295 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/networking/v1"
     )
     
    -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use
    +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
     // with apply.
     type HTTPIngressPathApplyConfiguration struct {
     	Path     *string                           `json:"path,omitempty"`
    @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct {
     	Backend  *IngressBackendApplyConfiguration `json:"backend,omitempty"`
     }
     
    -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with
    +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
     // apply.
     func HTTPIngressPath() *HTTPIngressPathApplyConfiguration {
     	return &HTTPIngressPathApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
    index fef529d696..ad9a7a6771 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use
    +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
     // with apply.
     type HTTPIngressRuleValueApplyConfiguration struct {
     	Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
     }
     
    -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with
    +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with
     // apply.
     func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration {
     	return &HTTPIngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
    index b5146902d4..607c26e943 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use
    +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
     // with apply.
     type IngressApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct {
     	Status                           *IngressStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Ingress constructs an declarative configuration of the Ingress type for use with
    +// Ingress constructs a declarative configuration of the Ingress type for use with
     // apply.
     func Ingress(name, namespace string) *IngressApplyConfiguration {
     	b := &IngressApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IngressApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
    index 5757135991..b014b7beef 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
    @@ -22,14 +22,14 @@ import (
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use
    +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
     // with apply.
     type IngressBackendApplyConfiguration struct {
     	Service  *IngressServiceBackendApplyConfiguration            `json:"service,omitempty"`
     	Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
     }
     
    -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with
    +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with
     // apply.
     func IngressBackend() *IngressBackendApplyConfiguration {
     	return &IngressBackendApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
    index e33d0b2d9f..14acc7dbd8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use
    +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use
     // with apply.
     type IngressClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct {
     	Spec                             *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// IngressClass constructs an declarative configuration of the IngressClass type for use with
    +// IngressClass constructs a declarative configuration of the IngressClass type for use with
     // apply.
     func IngressClass(name string) *IngressClassApplyConfiguration {
     	b := &IngressClassApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IngressClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
    index a020d3a8df..0dba1ebc5d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use
    +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use
     // with apply.
     type IngressClassParametersReferenceApplyConfiguration struct {
     	APIGroup  *string `json:"apiGroup,omitempty"`
    @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with
    +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with
     // apply.
     func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration {
     	return &IngressClassParametersReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
    index ec0423e708..23e8484344 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use
    +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use
     // with apply.
     type IngressClassSpecApplyConfiguration struct {
     	Controller *string                                            `json:"controller,omitempty"`
     	Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"`
     }
     
    -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with
    +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with
     // apply.
     func IngressClassSpec() *IngressClassSpecApplyConfiguration {
     	return &IngressClassSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
    index 444275a127..d0feb44da4 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use
    +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
     // with apply.
     type IngressLoadBalancerIngressApplyConfiguration struct {
     	IP       *string                               `json:"ip,omitempty"`
    @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct {
     	Ports    []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with
    +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
     // apply.
     func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration {
     	return &IngressLoadBalancerIngressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
    index 8e01a301ac..08c841f06b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use
    +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
     // with apply.
     type IngressLoadBalancerStatusApplyConfiguration struct {
     	Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
     }
     
    -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with
    +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with
     // apply.
     func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration {
     	return &IngressLoadBalancerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
    index 82b5babd9c..b6411199fc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use
    +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
     // with apply.
     type IngressPortStatusApplyConfiguration struct {
     	Port     *int32       `json:"port,omitempty"`
    @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct {
     	Error    *string      `json:"error,omitempty"`
     }
     
    -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with
    +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
     // apply.
     func IngressPortStatus() *IngressPortStatusApplyConfiguration {
     	return &IngressPortStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
    index 8153e88fe2..4ef871f077 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use
    +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
     // with apply.
     type IngressRuleApplyConfiguration struct {
     	Host                               *string `json:"host,omitempty"`
    -	IngressRuleValueApplyConfiguration `json:",omitempty,inline"`
    +	IngressRuleValueApplyConfiguration `json:",inline"`
     }
     
    -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with
    +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with
     // apply.
     func IngressRule() *IngressRuleApplyConfiguration {
     	return &IngressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
    index d0e094387c..1e13e378be 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use
    +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
     // with apply.
     type IngressRuleValueApplyConfiguration struct {
     	HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
     }
     
    -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with
    +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with
     // apply.
     func IngressRuleValue() *IngressRuleValueApplyConfiguration {
     	return &IngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
    index 399739631b..07876afd17 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// IngressServiceBackendApplyConfiguration represents an declarative configuration of the IngressServiceBackend type for use
    +// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use
     // with apply.
     type IngressServiceBackendApplyConfiguration struct {
     	Name *string                               `json:"name,omitempty"`
     	Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"`
     }
     
    -// IngressServiceBackendApplyConfiguration constructs an declarative configuration of the IngressServiceBackend type for use with
    +// IngressServiceBackendApplyConfiguration constructs a declarative configuration of the IngressServiceBackend type for use with
     // apply.
     func IngressServiceBackend() *IngressServiceBackendApplyConfiguration {
     	return &IngressServiceBackendApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
    index 635514ecf7..0572153aa1 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use
    +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
     // with apply.
     type IngressSpecApplyConfiguration struct {
     	IngressClassName *string                           `json:"ingressClassName,omitempty"`
    @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct {
     	Rules            []IngressRuleApplyConfiguration   `json:"rules,omitempty"`
     }
     
    -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with
    +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
     // apply.
     func IngressSpec() *IngressSpecApplyConfiguration {
     	return &IngressSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
    index 7131bf8d0d..bd1327c93f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use
    +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
     // with apply.
     type IngressStatusApplyConfiguration struct {
     	LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
     }
     
    -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with
    +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with
     // apply.
     func IngressStatus() *IngressStatusApplyConfiguration {
     	return &IngressStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
    index 4d8d369f7c..44092503f9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use
    +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
     // with apply.
     type IngressTLSApplyConfiguration struct {
     	Hosts      []string `json:"hosts,omitempty"`
     	SecretName *string  `json:"secretName,omitempty"`
     }
     
    -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with
    +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
     // apply.
     func IngressTLS() *IngressTLSApplyConfiguration {
     	return &IngressTLSApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
    index 1efd6edfdc..f3447a8f10 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use
    +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use
     // with apply.
     type IPBlockApplyConfiguration struct {
     	CIDR   *string  `json:"cidr,omitempty"`
     	Except []string `json:"except,omitempty"`
     }
     
    -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with
    +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with
     // apply.
     func IPBlock() *IPBlockApplyConfiguration {
     	return &IPBlockApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
    index 409507310b..3f8c8a5351 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use
    +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use
     // with apply.
     type NetworkPolicyApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct {
     	Spec                             *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with
    +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with
     // apply.
     func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
     	b := &NetworkPolicyApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *NetworkPolicyApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
    index e5751c4413..46e2706ece 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use
    +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use
     // with apply.
     type NetworkPolicyEgressRuleApplyConfiguration struct {
     	Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
     	To    []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
     }
     
    -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with
    +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with
     // apply.
     func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration {
     	return &NetworkPolicyEgressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
    index 630fe1fabe..6e98759786 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use
    +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use
     // with apply.
     type NetworkPolicyIngressRuleApplyConfiguration struct {
     	Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
     	From  []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
     }
     
    -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with
    +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with
     // apply.
     func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration {
     	return &NetworkPolicyIngressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
    index 909b651c04..046de3e237 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use
    +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use
     // with apply.
     type NetworkPolicyPeerApplyConfiguration struct {
     	PodSelector       *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
    @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct {
     	IPBlock           *IPBlockApplyConfiguration          `json:"ipBlock,omitempty"`
     }
     
    -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with
    +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with
     // apply.
     func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration {
     	return &NetworkPolicyPeerApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
    index 73dbed1d89..581ef1c348 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
    @@ -23,7 +23,7 @@ import (
     	intstr "k8s.io/apimachinery/pkg/util/intstr"
     )
     
    -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use
    +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use
     // with apply.
     type NetworkPolicyPortApplyConfiguration struct {
     	Protocol *v1.Protocol        `json:"protocol,omitempty"`
    @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct {
     	EndPort  *int32              `json:"endPort,omitempty"`
     }
     
    -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with
    +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with
     // apply.
     func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration {
     	return &NetworkPolicyPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
    index 882d8233a9..da5ed5d358 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use
    +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use
     // with apply.
     type NetworkPolicySpecApplyConfiguration struct {
     	PodSelector *v1.LabelSelectorApplyConfiguration          `json:"podSelector,omitempty"`
    @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct {
     	PolicyTypes []apinetworkingv1.PolicyType                 `json:"policyTypes,omitempty"`
     }
     
    -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with
    +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with
     // apply.
     func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration {
     	return &NetworkPolicySpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
    index ec278960ca..517f974838 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// ServiceBackendPortApplyConfiguration represents an declarative configuration of the ServiceBackendPort type for use
    +// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use
     // with apply.
     type ServiceBackendPortApplyConfiguration struct {
     	Name   *string `json:"name,omitempty"`
     	Number *int32  `json:"number,omitempty"`
     }
     
    -// ServiceBackendPortApplyConfiguration constructs an declarative configuration of the ServiceBackendPort type for use with
    +// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with
     // apply.
     func ServiceBackendPort() *ServiceBackendPortApplyConfiguration {
     	return &ServiceBackendPortApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
    index da6822111d..999c23fa14 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use
    +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use
     // with apply.
     type IPAddressApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type IPAddressApplyConfiguration struct {
     	Spec                             *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// IPAddress constructs an declarative configuration of the IPAddress type for use with
    +// IPAddress constructs a declarative configuration of the IPAddress type for use with
     // apply.
     func IPAddress(name string) *IPAddressApplyConfiguration {
     	b := &IPAddressApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IPAddressApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go
    index 064963d691..bf025a8c1a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1alpha1
     
    -// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use
    +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use
     // with apply.
     type IPAddressSpecApplyConfiguration struct {
     	ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"`
     }
     
    -// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with
    +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with
     // apply.
     func IPAddressSpec() *IPAddressSpecApplyConfiguration {
     	return &IPAddressSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go
    index ce1049709a..d5a52d503d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use
    +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use
     // with apply.
     type ParentReferenceApplyConfiguration struct {
     	Group     *string `json:"group,omitempty"`
    @@ -27,7 +27,7 @@ type ParentReferenceApplyConfiguration struct {
     	Name      *string `json:"name,omitempty"`
     }
     
    -// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with
    +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with
     // apply.
     func ParentReference() *ParentReferenceApplyConfiguration {
     	return &ParentReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
    index f6d0a91e00..984e049f28 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use
    +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use
     // with apply.
     type ServiceCIDRApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ServiceCIDRApplyConfiguration struct {
     	Status                           *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with
    +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with
     // apply.
     func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
     	b := &ServiceCIDRApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ServiceCIDRApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go
    index 302d69194c..7875ff403b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1alpha1
     
    -// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use
    +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use
     // with apply.
     type ServiceCIDRSpecApplyConfiguration struct {
     	CIDRs []string `json:"cidrs,omitempty"`
     }
     
    -// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with
    +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with
     // apply.
     func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration {
     	return &ServiceCIDRSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go
    index 5afc549a65..34715e3a49 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use
    +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use
     // with apply.
     type ServiceCIDRStatusApplyConfiguration struct {
     	Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with
    +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with
     // apply.
     func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration {
     	return &ServiceCIDRStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
    index b12907e81c..61b458f7ee 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/networking/v1beta1"
     )
     
    -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use
    +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
     // with apply.
     type HTTPIngressPathApplyConfiguration struct {
     	Path     *string                           `json:"path,omitempty"`
    @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct {
     	Backend  *IngressBackendApplyConfiguration `json:"backend,omitempty"`
     }
     
    -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with
    +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
     // apply.
     func HTTPIngressPath() *HTTPIngressPathApplyConfiguration {
     	return &HTTPIngressPathApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
    index 3137bc5eb0..1245452237 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use
    +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
     // with apply.
     type HTTPIngressRuleValueApplyConfiguration struct {
     	Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
     }
     
    -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with
    +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with
     // apply.
     func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration {
     	return &HTTPIngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
    index 56f65c30a9..0df53ea652 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use
    +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
     // with apply.
     type IngressApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct {
     	Status                           *IngressStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// Ingress constructs an declarative configuration of the Ingress type for use with
    +// Ingress constructs a declarative configuration of the Ingress type for use with
     // apply.
     func Ingress(name, namespace string) *IngressApplyConfiguration {
     	b := &IngressApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IngressApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
    index f19c2f2ee2..9d386f1608 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
    @@ -23,7 +23,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use
    +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
     // with apply.
     type IngressBackendApplyConfiguration struct {
     	ServiceName *string                                         `json:"serviceName,omitempty"`
    @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct {
     	Resource    *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
     }
     
    -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with
    +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with
     // apply.
     func IngressBackend() *IngressBackendApplyConfiguration {
     	return &IngressBackendApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
    index b65d4b3073..b0e877b57a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use
    +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use
     // with apply.
     type IngressClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct {
     	Spec                             *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// IngressClass constructs an declarative configuration of the IngressClass type for use with
    +// IngressClass constructs a declarative configuration of the IngressClass type for use with
     // apply.
     func IngressClass(name string) *IngressClassApplyConfiguration {
     	b := &IngressClassApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IngressClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
    index e6ca805e47..2a307a6760 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use
    +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use
     // with apply.
     type IngressClassParametersReferenceApplyConfiguration struct {
     	APIGroup  *string `json:"apiGroup,omitempty"`
    @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with
    +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with
     // apply.
     func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration {
     	return &IngressClassParametersReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
    index 51040462ca..eefbf62b87 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use
    +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use
     // with apply.
     type IngressClassSpecApplyConfiguration struct {
     	Controller *string                                            `json:"controller,omitempty"`
     	Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"`
     }
     
    -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with
    +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with
     // apply.
     func IngressClassSpec() *IngressClassSpecApplyConfiguration {
     	return &IngressClassSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
    index 20bf637805..12dbc35969 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use
    +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
     // with apply.
     type IngressLoadBalancerIngressApplyConfiguration struct {
     	IP       *string                               `json:"ip,omitempty"`
    @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct {
     	Ports    []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
     }
     
    -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with
    +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
     // apply.
     func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration {
     	return &IngressLoadBalancerIngressApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
    index e16dd23633..e896ab3415 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use
    +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
     // with apply.
     type IngressLoadBalancerStatusApplyConfiguration struct {
     	Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
     }
     
    -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with
    +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with
     // apply.
     func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration {
     	return &IngressLoadBalancerStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
    index 0836537979..4ee3f01617 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use
    +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
     // with apply.
     type IngressPortStatusApplyConfiguration struct {
     	Port     *int32       `json:"port,omitempty"`
    @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct {
     	Error    *string      `json:"error,omitempty"`
     }
     
    -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with
    +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
     // apply.
     func IngressPortStatus() *IngressPortStatusApplyConfiguration {
     	return &IngressPortStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
    index 015541eeb9..dc676f7b60 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use
    +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
     // with apply.
     type IngressRuleApplyConfiguration struct {
     	Host                               *string `json:"host,omitempty"`
    -	IngressRuleValueApplyConfiguration `json:",omitempty,inline"`
    +	IngressRuleValueApplyConfiguration `json:",inline"`
     }
     
    -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with
    +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with
     // apply.
     func IngressRule() *IngressRuleApplyConfiguration {
     	return &IngressRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
    index 2d03c7b132..4a64124755 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use
    +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
     // with apply.
     type IngressRuleValueApplyConfiguration struct {
     	HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
     }
     
    -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with
    +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with
     // apply.
     func IngressRuleValue() *IngressRuleValueApplyConfiguration {
     	return &IngressRuleValueApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
    index 1ab4d8bb73..58fbde8b35 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use
    +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
     // with apply.
     type IngressSpecApplyConfiguration struct {
     	IngressClassName *string                           `json:"ingressClassName,omitempty"`
    @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct {
     	Rules            []IngressRuleApplyConfiguration   `json:"rules,omitempty"`
     }
     
    -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with
    +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
     // apply.
     func IngressSpec() *IngressSpecApplyConfiguration {
     	return &IngressSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
    index faa7e2446f..3aed616889 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use
    +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
     // with apply.
     type IngressStatusApplyConfiguration struct {
     	LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
     }
     
    -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with
    +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with
     // apply.
     func IngressStatus() *IngressStatusApplyConfiguration {
     	return &IngressStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
    index 8ca93a0bc2..63648cd464 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use
    +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
     // with apply.
     type IngressTLSApplyConfiguration struct {
     	Hosts      []string `json:"hosts,omitempty"`
     	SecretName *string  `json:"secretName,omitempty"`
     }
     
    -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with
    +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
     // apply.
     func IngressTLS() *IngressTLSApplyConfiguration {
     	return &IngressTLSApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
    new file mode 100644
    index 0000000000..3047d79b95
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
    @@ -0,0 +1,253 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use
    +// with apply.
    +type IPAddressApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
    +}
    +
    +// IPAddress constructs a declarative configuration of the IPAddress type for use with
    +// apply.
    +func IPAddress(name string) *IPAddressApplyConfiguration {
    +	b := &IPAddressApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("IPAddress")
    +	b.WithAPIVersion("networking.k8s.io/v1beta1")
    +	return b
    +}
    +
    +// ExtractIPAddress extracts the applied configuration owned by fieldManager from
    +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a
    +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API.
    +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
    +	return extractIPAddress(iPAddress, fieldManager, "")
    +}
    +
    +// ExtractIPAddressStatus is the same as ExtractIPAddress except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
    +	return extractIPAddress(iPAddress, fieldManager, "status")
    +}
    +
    +func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) {
    +	b := &IPAddressApplyConfiguration{}
    +	err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(iPAddress.Name)
    +
    +	b.WithKind("IPAddress")
    +	b.WithAPIVersion("networking.k8s.io/v1beta1")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *IPAddressApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go
    new file mode 100644
    index 0000000000..76b02137d2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use
    +// with apply.
    +type IPAddressSpecApplyConfiguration struct {
    +	ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"`
    +}
    +
    +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with
    +// apply.
    +func IPAddressSpec() *IPAddressSpecApplyConfiguration {
    +	return &IPAddressSpecApplyConfiguration{}
    +}
    +
    +// WithParentRef sets the ParentRef field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ParentRef field is set to the value of the last call.
    +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration {
    +	b.ParentRef = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go
    new file mode 100644
    index 0000000000..1863938f16
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use
    +// with apply.
    +type ParentReferenceApplyConfiguration struct {
    +	Group     *string `json:"group,omitempty"`
    +	Resource  *string `json:"resource,omitempty"`
    +	Namespace *string `json:"namespace,omitempty"`
    +	Name      *string `json:"name,omitempty"`
    +}
    +
    +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with
    +// apply.
    +func ParentReference() *ParentReferenceApplyConfiguration {
    +	return &ParentReferenceApplyConfiguration{}
    +}
    +
    +// WithGroup sets the Group field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Group field is set to the value of the last call.
    +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration {
    +	b.Group = &value
    +	return b
    +}
    +
    +// WithResource sets the Resource field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Resource field is set to the value of the last call.
    +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration {
    +	b.Resource = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration {
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration {
    +	b.Name = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
    new file mode 100644
    index 0000000000..4ef8e9ecac
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
    @@ -0,0 +1,262 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use
    +// with apply.
    +type ServiceCIDRApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *ServiceCIDRSpecApplyConfiguration   `json:"spec,omitempty"`
    +	Status                           *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
    +}
    +
    +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with
    +// apply.
    +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
    +	b := &ServiceCIDRApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("ServiceCIDR")
    +	b.WithAPIVersion("networking.k8s.io/v1beta1")
    +	return b
    +}
    +
    +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from
    +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a
    +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API.
    +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
    +	return extractServiceCIDR(serviceCIDR, fieldManager, "")
    +}
    +
    +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
    +	return extractServiceCIDR(serviceCIDR, fieldManager, "status")
    +}
    +
    +func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
    +	b := &ServiceCIDRApplyConfiguration{}
    +	err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(serviceCIDR.Name)
    +
    +	b.WithKind("ServiceCIDR")
    +	b.WithAPIVersion("networking.k8s.io/v1beta1")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// WithStatus sets the Status field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Status field is set to the value of the last call.
    +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration {
    +	b.Status = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ServiceCIDRApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go
    new file mode 100644
    index 0000000000..1f283532d3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go
    @@ -0,0 +1,41 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use
    +// with apply.
    +type ServiceCIDRSpecApplyConfiguration struct {
    +	CIDRs []string `json:"cidrs,omitempty"`
    +}
    +
    +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with
    +// apply.
    +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration {
    +	return &ServiceCIDRSpecApplyConfiguration{}
    +}
    +
    +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the CIDRs field.
    +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration {
    +	for i := range values {
    +		b.CIDRs = append(b.CIDRs, values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go
    new file mode 100644
    index 0000000000..f2dd92404d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use
    +// with apply.
    +type ServiceCIDRStatusApplyConfiguration struct {
    +	Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
    +}
    +
    +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with
    +// apply.
    +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration {
    +	return &ServiceCIDRStatusApplyConfiguration{}
    +}
    +
    +// WithConditions adds the given value to the Conditions field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Conditions field.
    +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithConditions")
    +		}
    +		b.Conditions = append(b.Conditions, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
    index 9eec002671..6694538fc3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use
    +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
     // with apply.
     type OverheadApplyConfiguration struct {
     	PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
     }
     
    -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with
    +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with
     // apply.
     func Overhead() *OverheadApplyConfiguration {
     	return &OverheadApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
    index 3c9d1fc467..6ce01a319c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use
    +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
     // with apply.
     type RuntimeClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct {
     	Scheduling                       *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
     }
     
    -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with
    +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
     // apply.
     func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
     	b := &RuntimeClassApplyConfiguration{}
    @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo
     	b.Scheduling = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RuntimeClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
    index e01db85d7b..2d084e0f59 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use
    +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
     // with apply.
     type SchedulingApplyConfiguration struct {
     	NodeSelector map[string]string                 `json:"nodeSelector,omitempty"`
     	Tolerations  []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
     }
     
    -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with
    +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
     // apply.
     func Scheduling() *SchedulingApplyConfiguration {
     	return &SchedulingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
    index 1ddaa64acc..84770a0920 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use
    +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
     // with apply.
     type OverheadApplyConfiguration struct {
     	PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
     }
     
    -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with
    +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with
     // apply.
     func Overhead() *OverheadApplyConfiguration {
     	return &OverheadApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
    index e680e12deb..9f139ee1b6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use
    +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
     // with apply.
     type RuntimeClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type RuntimeClassApplyConfiguration struct {
     	Spec                             *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with
    +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
     // apply.
     func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
     	b := &RuntimeClassApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RuntimeClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
    index 86e8585ad3..1aa43eb132 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// RuntimeClassSpecApplyConfiguration represents an declarative configuration of the RuntimeClassSpec type for use
    +// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use
     // with apply.
     type RuntimeClassSpecApplyConfiguration struct {
     	RuntimeHandler *string                       `json:"runtimeHandler,omitempty"`
    @@ -26,7 +26,7 @@ type RuntimeClassSpecApplyConfiguration struct {
     	Scheduling     *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
     }
     
    -// RuntimeClassSpecApplyConfiguration constructs an declarative configuration of the RuntimeClassSpec type for use with
    +// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with
     // apply.
     func RuntimeClassSpec() *RuntimeClassSpecApplyConfiguration {
     	return &RuntimeClassSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
    index d4117d6bc7..6ce49ad866 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use
    +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
     // with apply.
     type SchedulingApplyConfiguration struct {
     	NodeSelector map[string]string                 `json:"nodeSelector,omitempty"`
     	Tolerations  []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
     }
     
    -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with
    +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
     // apply.
     func Scheduling() *SchedulingApplyConfiguration {
     	return &SchedulingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
    index e8c4895505..cf767e702e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/api/core/v1"
     )
     
    -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use
    +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
     // with apply.
     type OverheadApplyConfiguration struct {
     	PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
     }
     
    -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with
    +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with
     // apply.
     func Overhead() *OverheadApplyConfiguration {
     	return &OverheadApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
    index f5487665c3..fa6c9f45bf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use
    +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
     // with apply.
     type RuntimeClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct {
     	Scheduling                       *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
     }
     
    -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with
    +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
     // apply.
     func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
     	b := &RuntimeClassApplyConfiguration{}
    @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo
     	b.Scheduling = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RuntimeClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
    index 10831d0ff5..23d0b97527 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use
    +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
     // with apply.
     type SchedulingApplyConfiguration struct {
     	NodeSelector map[string]string                 `json:"nodeSelector,omitempty"`
     	Tolerations  []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
     }
     
    -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with
    +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
     // apply.
     func Scheduling() *SchedulingApplyConfiguration {
     	return &SchedulingApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
    index 76a9533a6f..3a051619f8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use
    +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use
     // with apply.
     type EvictionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct {
     	DeleteOptions                    *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
     }
     
    -// Eviction constructs an declarative configuration of the Eviction type for use with
    +// Eviction constructs a declarative configuration of the Eviction type for use with
     // apply.
     func Eviction(name, namespace string) *EvictionApplyConfiguration {
     	b := &EvictionApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp
     	b.DeleteOptions = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EvictionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
    index 6b547c2695..a765a7b623 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use
    +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use
     // with apply.
     type PodDisruptionBudgetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct {
     	Status                           *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with
    +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with
     // apply.
     func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration {
     	b := &PodDisruptionBudgetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
    index 67d9ba6bba..2917145451 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use
    +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
     // with apply.
     type PodDisruptionBudgetSpecApplyConfiguration struct {
     	MinAvailable               *intstr.IntOrString                      `json:"minAvailable,omitempty"`
    @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct {
     	UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
     }
     
    -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with
    +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with
     // apply.
     func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration {
     	return &PodDisruptionBudgetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
    index 2dd427b9e1..d0f9baf41c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use
    +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use
     // with apply.
     type PodDisruptionBudgetStatusApplyConfiguration struct {
     	ObservedGeneration *int64                               `json:"observedGeneration,omitempty"`
    @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct {
     	Conditions         []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with
    +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with
     // apply.
     func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration {
     	return &PodDisruptionBudgetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
    index d2a361d1b5..d4121af206 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use
    +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use
     // with apply.
     type EvictionApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct {
     	DeleteOptions                    *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
     }
     
    -// Eviction constructs an declarative configuration of the Eviction type for use with
    +// Eviction constructs a declarative configuration of the Eviction type for use with
     // apply.
     func Eviction(name, namespace string) *EvictionApplyConfiguration {
     	b := &EvictionApplyConfiguration{}
    @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp
     	b.DeleteOptions = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *EvictionApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
    index cef51a279c..813b57bae7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use
    +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use
     // with apply.
     type PodDisruptionBudgetApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct {
     	Status                           *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with
    +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with
     // apply.
     func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration {
     	b := &PodDisruptionBudgetApplyConfiguration{}
    @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
    index 0ba3ea1c2e..405f1148b0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
    @@ -24,7 +24,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use
    +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
     // with apply.
     type PodDisruptionBudgetSpecApplyConfiguration struct {
     	MinAvailable               *intstr.IntOrString                     `json:"minAvailable,omitempty"`
    @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct {
     	UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
     }
     
    -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with
    +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with
     // apply.
     func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration {
     	return &PodDisruptionBudgetSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
    index d0813590e1..e66a7fb386 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
    @@ -23,7 +23,7 @@ import (
     	metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use
    +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use
     // with apply.
     type PodDisruptionBudgetStatusApplyConfiguration struct {
     	ObservedGeneration *int64                               `json:"observedGeneration,omitempty"`
    @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct {
     	Conditions         []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
     }
     
    -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with
    +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with
     // apply.
     func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration {
     	return &PodDisruptionBudgetStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
    index fda9205c21..5ae4dc37ff 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use
    +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
     // with apply.
     type AggregationRuleApplyConfiguration struct {
     	ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
     }
     
    -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with
    +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with
     // apply.
     func AggregationRule() *AggregationRuleApplyConfiguration {
     	return &AggregationRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
    index 3a5660fe19..c5b0075ec0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use
    +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
     // with apply.
     type ClusterRoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct {
     	AggregationRule                  *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
     }
     
    -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with
    +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
     // apply.
     func ClusterRole(name string) *ClusterRoleApplyConfiguration {
     	b := &ClusterRoleApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
     	b.AggregationRule = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
    index 625ad72c44..91a9d5df31 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use
    +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
     // with apply.
     type ClusterRoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with
    +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
     // apply.
     func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
     	b := &ClusterRoleBindingApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
    index 65ee1d4fe5..a2e66d1096 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use
    +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
     // with apply.
     type PolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct {
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with
    +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with
     // apply.
     func PolicyRule() *PolicyRuleApplyConfiguration {
     	return &PolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
    index 97df25fb65..b51f904267 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleApplyConfiguration represents an declarative configuration of the Role type for use
    +// RoleApplyConfiguration represents a declarative configuration of the Role type for use
     // with apply.
     type RoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct {
     	Rules                            []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
     }
     
    -// Role constructs an declarative configuration of the Role type for use with
    +// Role constructs a declarative configuration of the Role type for use with
     // apply.
     func Role(name, namespace string) *RoleApplyConfiguration {
     	b := &RoleApplyConfiguration{}
    @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
    index 7270f07e49..e59c8e6d30 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use
    +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
     // with apply.
     type RoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with
    +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
     // apply.
     func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
     	b := &RoleBindingApplyConfiguration{}
    @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
    index ef03a48827..646a3bb194 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use
    +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
     // with apply.
     type RoleRefApplyConfiguration struct {
     	APIGroup *string `json:"apiGroup,omitempty"`
    @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct {
     	Name     *string `json:"name,omitempty"`
     }
     
    -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with
    +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
     // apply.
     func RoleRef() *RoleRefApplyConfiguration {
     	return &RoleRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
    index ebc87fdc45..e1d9c5cfb8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind      *string `json:"kind,omitempty"`
    @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
    index 63cdc3fcca..ff4aeb59e5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use
    +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
     // with apply.
     type AggregationRuleApplyConfiguration struct {
     	ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
     }
     
    -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with
    +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with
     // apply.
     func AggregationRule() *AggregationRuleApplyConfiguration {
     	return &AggregationRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
    index 19b1180fad..dc0e34e53b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use
    +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
     // with apply.
     type ClusterRoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct {
     	AggregationRule                  *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
     }
     
    -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with
    +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
     // apply.
     func ClusterRole(name string) *ClusterRoleApplyConfiguration {
     	b := &ClusterRoleApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
     	b.AggregationRule = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
    index a1723efc35..d3c12ec508 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use
    +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
     // with apply.
     type ClusterRoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with
    +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
     // apply.
     func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
     	b := &ClusterRoleBindingApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
    index 12143af130..89d7a2914f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use
    +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
     // with apply.
     type PolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct {
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with
    +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with
     // apply.
     func PolicyRule() *PolicyRuleApplyConfiguration {
     	return &PolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
    index cd256397a2..db0a4f7169 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleApplyConfiguration represents an declarative configuration of the Role type for use
    +// RoleApplyConfiguration represents a declarative configuration of the Role type for use
     // with apply.
     type RoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct {
     	Rules                            []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
     }
     
    -// Role constructs an declarative configuration of the Role type for use with
    +// Role constructs a declarative configuration of the Role type for use with
     // apply.
     func Role(name, namespace string) *RoleApplyConfiguration {
     	b := &RoleApplyConfiguration{}
    @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
    index a0ec20d0b1..8efcddd69d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use
    +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
     // with apply.
     type RoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with
    +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
     // apply.
     func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
     	b := &RoleBindingApplyConfiguration{}
    @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
    index 40dbc33073..4b2553117d 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use
    +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
     // with apply.
     type RoleRefApplyConfiguration struct {
     	APIGroup *string `json:"apiGroup,omitempty"`
    @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct {
     	Name     *string `json:"name,omitempty"`
     }
     
    -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with
    +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
     // apply.
     func RoleRef() *RoleRefApplyConfiguration {
     	return &RoleRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
    index 46640dbbe9..665b42af50 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind       *string `json:"kind,omitempty"`
    @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct {
     	Namespace  *string `json:"namespace,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
    index d52ac3db9b..e9bb68dcb6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
    @@ -22,13 +22,13 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use
    +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
     // with apply.
     type AggregationRuleApplyConfiguration struct {
     	ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
     }
     
    -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with
    +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with
     // apply.
     func AggregationRule() *AggregationRuleApplyConfiguration {
     	return &AggregationRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
    index cf714ecc27..5e9c238540 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use
    +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
     // with apply.
     type ClusterRoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct {
     	AggregationRule                  *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
     }
     
    -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with
    +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
     // apply.
     func ClusterRole(name string) *ClusterRoleApplyConfiguration {
     	b := &ClusterRoleApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
     	b.AggregationRule = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
    index b97cbcba2f..2f088b93e5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use
    +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
     // with apply.
     type ClusterRoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with
    +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
     // apply.
     func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
     	b := &ClusterRoleBindingApplyConfiguration{}
    @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
    index c63dc68c6b..dc630df206 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use
    +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
     // with apply.
     type PolicyRuleApplyConfiguration struct {
     	Verbs           []string `json:"verbs,omitempty"`
    @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct {
     	NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
     }
     
    -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with
    +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with
     // apply.
     func PolicyRule() *PolicyRuleApplyConfiguration {
     	return &PolicyRuleApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
    index 53a751eb34..4b1b6112bd 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleApplyConfiguration represents an declarative configuration of the Role type for use
    +// RoleApplyConfiguration represents a declarative configuration of the Role type for use
     // with apply.
     type RoleApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct {
     	Rules                            []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
     }
     
    -// Role constructs an declarative configuration of the Role type for use with
    +// Role constructs a declarative configuration of the Role type for use with
     // apply.
     func Role(name, namespace string) *RoleApplyConfiguration {
     	b := &RoleApplyConfiguration{}
    @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
    index ecccdf91b1..246928553f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use
    +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
     // with apply.
     type RoleBindingApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct {
     	RoleRef                          *RoleRefApplyConfiguration  `json:"roleRef,omitempty"`
     }
     
    -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with
    +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
     // apply.
     func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
     	b := &RoleBindingApplyConfiguration{}
    @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
     	b.RoleRef = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *RoleBindingApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
    index e6a02dc602..19d0420a81 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use
    +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
     // with apply.
     type RoleRefApplyConfiguration struct {
     	APIGroup *string `json:"apiGroup,omitempty"`
    @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct {
     	Name     *string `json:"name,omitempty"`
     }
     
    -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with
    +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
     // apply.
     func RoleRef() *RoleRefApplyConfiguration {
     	return &RoleRefApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
    index b616da8b13..f7c1a21a9c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use
    +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
     // with apply.
     type SubjectApplyConfiguration struct {
     	Kind      *string `json:"kind,omitempty"`
    @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct {
     	Namespace *string `json:"namespace,omitempty"`
     }
     
    -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with
    +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
     // apply.
     func Subject() *SubjectApplyConfiguration {
     	return &SubjectApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go
    deleted file mode 100644
    index bc6078aa94..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go
    +++ /dev/null
    @@ -1,66 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	v1 "k8s.io/client-go/applyconfigurations/core/v1"
    -)
    -
    -// AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use
    -// with apply.
    -type AllocationResultApplyConfiguration struct {
    -	ResourceHandles  []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"`
    -	AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"`
    -	Shareable        *bool                              `json:"shareable,omitempty"`
    -}
    -
    -// AllocationResultApplyConfiguration constructs an declarative configuration of the AllocationResult type for use with
    -// apply.
    -func AllocationResult() *AllocationResultApplyConfiguration {
    -	return &AllocationResultApplyConfiguration{}
    -}
    -
    -// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the ResourceHandles field.
    -func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithResourceHandles")
    -		}
    -		b.ResourceHandles = append(b.ResourceHandles, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithAvailableOnNodes sets the AvailableOnNodes field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the AvailableOnNodes field is set to the value of the last call.
    -func (b *AllocationResultApplyConfiguration) WithAvailableOnNodes(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration {
    -	b.AvailableOnNodes = value
    -	return b
    -}
    -
    -// WithShareable sets the Shareable field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Shareable field is set to the value of the last call.
    -func (b *AllocationResultApplyConfiguration) WithShareable(value bool) *AllocationResultApplyConfiguration {
    -	b.Shareable = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go
    deleted file mode 100644
    index 0c8be0e6aa..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// AllocationResultModelApplyConfiguration represents an declarative configuration of the AllocationResultModel type for use
    -// with apply.
    -type AllocationResultModelApplyConfiguration struct {
    -	NamedResources *NamedResourcesAllocationResultApplyConfiguration `json:"namedResources,omitempty"`
    -}
    -
    -// AllocationResultModelApplyConfiguration constructs an declarative configuration of the AllocationResultModel type for use with
    -// apply.
    -func AllocationResultModel() *AllocationResultModelApplyConfiguration {
    -	return &AllocationResultModelApplyConfiguration{}
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *AllocationResultModelApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *AllocationResultModelApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go
    deleted file mode 100644
    index a1f082fad7..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go
    +++ /dev/null
    @@ -1,52 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// DriverAllocationResultApplyConfiguration represents an declarative configuration of the DriverAllocationResult type for use
    -// with apply.
    -type DriverAllocationResultApplyConfiguration struct {
    -	VendorRequestParameters                 *runtime.RawExtension `json:"vendorRequestParameters,omitempty"`
    -	AllocationResultModelApplyConfiguration `json:",inline"`
    -}
    -
    -// DriverAllocationResultApplyConfiguration constructs an declarative configuration of the DriverAllocationResult type for use with
    -// apply.
    -func DriverAllocationResult() *DriverAllocationResultApplyConfiguration {
    -	return &DriverAllocationResultApplyConfiguration{}
    -}
    -
    -// WithVendorRequestParameters sets the VendorRequestParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VendorRequestParameters field is set to the value of the last call.
    -func (b *DriverAllocationResultApplyConfiguration) WithVendorRequestParameters(value runtime.RawExtension) *DriverAllocationResultApplyConfiguration {
    -	b.VendorRequestParameters = &value
    -	return b
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *DriverAllocationResultApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *DriverAllocationResultApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go
    deleted file mode 100644
    index 8052915784..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go
    +++ /dev/null
    @@ -1,66 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// DriverRequestsApplyConfiguration represents an declarative configuration of the DriverRequests type for use
    -// with apply.
    -type DriverRequestsApplyConfiguration struct {
    -	DriverName       *string                             `json:"driverName,omitempty"`
    -	VendorParameters *runtime.RawExtension               `json:"vendorParameters,omitempty"`
    -	Requests         []ResourceRequestApplyConfiguration `json:"requests,omitempty"`
    -}
    -
    -// DriverRequestsApplyConfiguration constructs an declarative configuration of the DriverRequests type for use with
    -// apply.
    -func DriverRequests() *DriverRequestsApplyConfiguration {
    -	return &DriverRequestsApplyConfiguration{}
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *DriverRequestsApplyConfiguration) WithDriverName(value string) *DriverRequestsApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VendorParameters field is set to the value of the last call.
    -func (b *DriverRequestsApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *DriverRequestsApplyConfiguration {
    -	b.VendorParameters = &value
    -	return b
    -}
    -
    -// WithRequests adds the given value to the Requests field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Requests field.
    -func (b *DriverRequestsApplyConfiguration) WithRequests(values ...*ResourceRequestApplyConfiguration) *DriverRequestsApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithRequests")
    -		}
    -		b.Requests = append(b.Requests, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go
    deleted file mode 100644
    index 311edbac80..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesAllocationResultApplyConfiguration represents an declarative configuration of the NamedResourcesAllocationResult type for use
    -// with apply.
    -type NamedResourcesAllocationResultApplyConfiguration struct {
    -	Name *string `json:"name,omitempty"`
    -}
    -
    -// NamedResourcesAllocationResultApplyConfiguration constructs an declarative configuration of the NamedResourcesAllocationResult type for use with
    -// apply.
    -func NamedResourcesAllocationResult() *NamedResourcesAllocationResultApplyConfiguration {
    -	return &NamedResourcesAllocationResultApplyConfiguration{}
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *NamedResourcesAllocationResultApplyConfiguration) WithName(value string) *NamedResourcesAllocationResultApplyConfiguration {
    -	b.Name = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go
    deleted file mode 100644
    index d9545d054f..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go
    +++ /dev/null
    @@ -1,100 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resource "k8s.io/apimachinery/pkg/api/resource"
    -)
    -
    -// NamedResourcesAttributeApplyConfiguration represents an declarative configuration of the NamedResourcesAttribute type for use
    -// with apply.
    -type NamedResourcesAttributeApplyConfiguration struct {
    -	Name                                           *string `json:"name,omitempty"`
    -	NamedResourcesAttributeValueApplyConfiguration `json:",inline"`
    -}
    -
    -// NamedResourcesAttributeApplyConfiguration constructs an declarative configuration of the NamedResourcesAttribute type for use with
    -// apply.
    -func NamedResourcesAttribute() *NamedResourcesAttributeApplyConfiguration {
    -	return &NamedResourcesAttributeApplyConfiguration{}
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithName(value string) *NamedResourcesAttributeApplyConfiguration {
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the QuantityValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeApplyConfiguration {
    -	b.QuantityValue = &value
    -	return b
    -}
    -
    -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the BoolValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeApplyConfiguration {
    -	b.BoolValue = &value
    -	return b
    -}
    -
    -// WithIntValue sets the IntValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the IntValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeApplyConfiguration {
    -	b.IntValue = &value
    -	return b
    -}
    -
    -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the IntSliceValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration {
    -	b.IntSliceValue = value
    -	return b
    -}
    -
    -// WithStringValue sets the StringValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StringValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeApplyConfiguration {
    -	b.StringValue = &value
    -	return b
    -}
    -
    -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StringSliceValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration {
    -	b.StringSliceValue = value
    -	return b
    -}
    -
    -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VersionValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeApplyConfiguration {
    -	b.VersionValue = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go
    deleted file mode 100644
    index e0b19650a9..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go
    +++ /dev/null
    @@ -1,97 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resource "k8s.io/apimachinery/pkg/api/resource"
    -)
    -
    -// NamedResourcesAttributeValueApplyConfiguration represents an declarative configuration of the NamedResourcesAttributeValue type for use
    -// with apply.
    -type NamedResourcesAttributeValueApplyConfiguration struct {
    -	QuantityValue    *resource.Quantity                           `json:"quantity,omitempty"`
    -	BoolValue        *bool                                        `json:"bool,omitempty"`
    -	IntValue         *int64                                       `json:"int,omitempty"`
    -	IntSliceValue    *NamedResourcesIntSliceApplyConfiguration    `json:"intSlice,omitempty"`
    -	StringValue      *string                                      `json:"string,omitempty"`
    -	StringSliceValue *NamedResourcesStringSliceApplyConfiguration `json:"stringSlice,omitempty"`
    -	VersionValue     *string                                      `json:"version,omitempty"`
    -}
    -
    -// NamedResourcesAttributeValueApplyConfiguration constructs an declarative configuration of the NamedResourcesAttributeValue type for use with
    -// apply.
    -func NamedResourcesAttributeValue() *NamedResourcesAttributeValueApplyConfiguration {
    -	return &NamedResourcesAttributeValueApplyConfiguration{}
    -}
    -
    -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the QuantityValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.QuantityValue = &value
    -	return b
    -}
    -
    -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the BoolValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.BoolValue = &value
    -	return b
    -}
    -
    -// WithIntValue sets the IntValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the IntValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.IntValue = &value
    -	return b
    -}
    -
    -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the IntSliceValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.IntSliceValue = value
    -	return b
    -}
    -
    -// WithStringValue sets the StringValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StringValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.StringValue = &value
    -	return b
    -}
    -
    -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StringSliceValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.StringSliceValue = value
    -	return b
    -}
    -
    -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VersionValue field is set to the value of the last call.
    -func (b *NamedResourcesAttributeValueApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeValueApplyConfiguration {
    -	b.VersionValue = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go
    deleted file mode 100644
    index e483d8622f..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesFilterApplyConfiguration represents an declarative configuration of the NamedResourcesFilter type for use
    -// with apply.
    -type NamedResourcesFilterApplyConfiguration struct {
    -	Selector *string `json:"selector,omitempty"`
    -}
    -
    -// NamedResourcesFilterApplyConfiguration constructs an declarative configuration of the NamedResourcesFilter type for use with
    -// apply.
    -func NamedResourcesFilter() *NamedResourcesFilterApplyConfiguration {
    -	return &NamedResourcesFilterApplyConfiguration{}
    -}
    -
    -// WithSelector sets the Selector field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Selector field is set to the value of the last call.
    -func (b *NamedResourcesFilterApplyConfiguration) WithSelector(value string) *NamedResourcesFilterApplyConfiguration {
    -	b.Selector = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go
    deleted file mode 100644
    index 4f01372e4c..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go
    +++ /dev/null
    @@ -1,53 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesInstanceApplyConfiguration represents an declarative configuration of the NamedResourcesInstance type for use
    -// with apply.
    -type NamedResourcesInstanceApplyConfiguration struct {
    -	Name       *string                                     `json:"name,omitempty"`
    -	Attributes []NamedResourcesAttributeApplyConfiguration `json:"attributes,omitempty"`
    -}
    -
    -// NamedResourcesInstanceApplyConfiguration constructs an declarative configuration of the NamedResourcesInstance type for use with
    -// apply.
    -func NamedResourcesInstance() *NamedResourcesInstanceApplyConfiguration {
    -	return &NamedResourcesInstanceApplyConfiguration{}
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *NamedResourcesInstanceApplyConfiguration) WithName(value string) *NamedResourcesInstanceApplyConfiguration {
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithAttributes adds the given value to the Attributes field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Attributes field.
    -func (b *NamedResourcesInstanceApplyConfiguration) WithAttributes(values ...*NamedResourcesAttributeApplyConfiguration) *NamedResourcesInstanceApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithAttributes")
    -		}
    -		b.Attributes = append(b.Attributes, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go
    deleted file mode 100644
    index ea00bffe51..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go
    +++ /dev/null
    @@ -1,41 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesIntSliceApplyConfiguration represents an declarative configuration of the NamedResourcesIntSlice type for use
    -// with apply.
    -type NamedResourcesIntSliceApplyConfiguration struct {
    -	Ints []int64 `json:"ints,omitempty"`
    -}
    -
    -// NamedResourcesIntSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesIntSlice type for use with
    -// apply.
    -func NamedResourcesIntSlice() *NamedResourcesIntSliceApplyConfiguration {
    -	return &NamedResourcesIntSliceApplyConfiguration{}
    -}
    -
    -// WithInts adds the given value to the Ints field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Ints field.
    -func (b *NamedResourcesIntSliceApplyConfiguration) WithInts(values ...int64) *NamedResourcesIntSliceApplyConfiguration {
    -	for i := range values {
    -		b.Ints = append(b.Ints, values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go
    deleted file mode 100644
    index 5adfd84ee5..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesRequestApplyConfiguration represents an declarative configuration of the NamedResourcesRequest type for use
    -// with apply.
    -type NamedResourcesRequestApplyConfiguration struct {
    -	Selector *string `json:"selector,omitempty"`
    -}
    -
    -// NamedResourcesRequestApplyConfiguration constructs an declarative configuration of the NamedResourcesRequest type for use with
    -// apply.
    -func NamedResourcesRequest() *NamedResourcesRequestApplyConfiguration {
    -	return &NamedResourcesRequestApplyConfiguration{}
    -}
    -
    -// WithSelector sets the Selector field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Selector field is set to the value of the last call.
    -func (b *NamedResourcesRequestApplyConfiguration) WithSelector(value string) *NamedResourcesRequestApplyConfiguration {
    -	b.Selector = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go
    deleted file mode 100644
    index f01ff8699a..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesResourcesApplyConfiguration represents an declarative configuration of the NamedResourcesResources type for use
    -// with apply.
    -type NamedResourcesResourcesApplyConfiguration struct {
    -	Instances []NamedResourcesInstanceApplyConfiguration `json:"instances,omitempty"`
    -}
    -
    -// NamedResourcesResourcesApplyConfiguration constructs an declarative configuration of the NamedResourcesResources type for use with
    -// apply.
    -func NamedResourcesResources() *NamedResourcesResourcesApplyConfiguration {
    -	return &NamedResourcesResourcesApplyConfiguration{}
    -}
    -
    -// WithInstances adds the given value to the Instances field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Instances field.
    -func (b *NamedResourcesResourcesApplyConfiguration) WithInstances(values ...*NamedResourcesInstanceApplyConfiguration) *NamedResourcesResourcesApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithInstances")
    -		}
    -		b.Instances = append(b.Instances, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go
    deleted file mode 100644
    index 1e93873546..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go
    +++ /dev/null
    @@ -1,41 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// NamedResourcesStringSliceApplyConfiguration represents an declarative configuration of the NamedResourcesStringSlice type for use
    -// with apply.
    -type NamedResourcesStringSliceApplyConfiguration struct {
    -	Strings []string `json:"strings,omitempty"`
    -}
    -
    -// NamedResourcesStringSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesStringSlice type for use with
    -// apply.
    -func NamedResourcesStringSlice() *NamedResourcesStringSliceApplyConfiguration {
    -	return &NamedResourcesStringSliceApplyConfiguration{}
    -}
    -
    -// WithStrings adds the given value to the Strings field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Strings field.
    -func (b *NamedResourcesStringSliceApplyConfiguration) WithStrings(values ...string) *NamedResourcesStringSliceApplyConfiguration {
    -	for i := range values {
    -		b.Strings = append(b.Strings, values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go
    deleted file mode 100644
    index 1dfb6ff97b..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go
    +++ /dev/null
    @@ -1,258 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use
    -// with apply.
    -type PodSchedulingContextApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	Spec                             *PodSchedulingContextSpecApplyConfiguration   `json:"spec,omitempty"`
    -	Status                           *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"`
    -}
    -
    -// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with
    -// apply.
    -func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration {
    -	b := &PodSchedulingContextApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithNamespace(namespace)
    -	b.WithKind("PodSchedulingContext")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from
    -// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a
    -// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API.
    -// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
    -	return extractPodSchedulingContext(podSchedulingContext, fieldManager, "")
    -}
    -
    -// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
    -	return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status")
    -}
    -
    -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) {
    -	b := &PodSchedulingContextApplyConfiguration{}
    -	err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(podSchedulingContext.Name)
    -	b.WithNamespace(podSchedulingContext.Namespace)
    -
    -	b.WithKind("PodSchedulingContext")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithSpec sets the Spec field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Spec field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    -	b.Spec = value
    -	return b
    -}
    -
    -// WithStatus sets the Status field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Status field is set to the value of the last call.
    -func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    -	b.Status = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go
    deleted file mode 100644
    index 6c219f837b..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go
    +++ /dev/null
    @@ -1,258 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use
    -// with apply.
    -type ResourceClaimApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	Spec                             *ResourceClaimSpecApplyConfiguration   `json:"spec,omitempty"`
    -	Status                           *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
    -}
    -
    -// ResourceClaim constructs an declarative configuration of the ResourceClaim type for use with
    -// apply.
    -func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
    -	b := &ResourceClaimApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithNamespace(namespace)
    -	b.WithKind("ResourceClaim")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
    -// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
    -// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
    -// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
    -	return extractResourceClaim(resourceClaim, fieldManager, "")
    -}
    -
    -// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
    -	return extractResourceClaim(resourceClaim, fieldManager, "status")
    -}
    -
    -func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
    -	b := &ResourceClaimApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceClaim.Name)
    -	b.WithNamespace(resourceClaim.Namespace)
    -
    -	b.WithKind("ResourceClaim")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithSpec sets the Spec field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Spec field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration {
    -	b.Spec = value
    -	return b
    -}
    -
    -// WithStatus sets the Status field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Status field is set to the value of the last call.
    -func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration {
    -	b.Status = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go
    deleted file mode 100644
    index ea13570e33..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go
    +++ /dev/null
    @@ -1,272 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceClaimParametersApplyConfiguration represents an declarative configuration of the ResourceClaimParameters type for use
    -// with apply.
    -type ResourceClaimParametersApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	GeneratedFrom                    *ResourceClaimParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"`
    -	Shareable                        *bool                                               `json:"shareable,omitempty"`
    -	DriverRequests                   []DriverRequestsApplyConfiguration                  `json:"driverRequests,omitempty"`
    -}
    -
    -// ResourceClaimParameters constructs an declarative configuration of the ResourceClaimParameters type for use with
    -// apply.
    -func ResourceClaimParameters(name, namespace string) *ResourceClaimParametersApplyConfiguration {
    -	b := &ResourceClaimParametersApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithNamespace(namespace)
    -	b.WithKind("ResourceClaimParameters")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceClaimParameters extracts the applied configuration owned by fieldManager from
    -// resourceClaimParameters. If no managedFields are found in resourceClaimParameters for fieldManager, a
    -// ResourceClaimParametersApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceClaimParameters must be a unmodified ResourceClaimParameters API object that was retrieved from the Kubernetes API.
    -// ExtractResourceClaimParameters provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) {
    -	return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "")
    -}
    -
    -// ExtractResourceClaimParametersStatus is the same as ExtractResourceClaimParameters except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceClaimParametersStatus(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) {
    -	return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "status")
    -}
    -
    -func extractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string, subresource string) (*ResourceClaimParametersApplyConfiguration, error) {
    -	b := &ResourceClaimParametersApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceClaimParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimParameters"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceClaimParameters.Name)
    -	b.WithNamespace(resourceClaimParameters.Namespace)
    -
    -	b.WithKind("ResourceClaimParameters")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithKind(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithName(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithGenerateName(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithNamespace(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithUID(value types.UID) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithGeneration(value int64) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceClaimParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceClaimParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceClaimParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceClaimParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceClaimParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GeneratedFrom field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration {
    -	b.GeneratedFrom = value
    -	return b
    -}
    -
    -// WithShareable sets the Shareable field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Shareable field is set to the value of the last call.
    -func (b *ResourceClaimParametersApplyConfiguration) WithShareable(value bool) *ResourceClaimParametersApplyConfiguration {
    -	b.Shareable = &value
    -	return b
    -}
    -
    -// WithDriverRequests adds the given value to the DriverRequests field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the DriverRequests field.
    -func (b *ResourceClaimParametersApplyConfiguration) WithDriverRequests(values ...*DriverRequestsApplyConfiguration) *ResourceClaimParametersApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithDriverRequests")
    -		}
    -		b.DriverRequests = append(b.DriverRequests, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go
    deleted file mode 100644
    index 27820ede60..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use
    -// with apply.
    -type ResourceClaimParametersReferenceApplyConfiguration struct {
    -	APIGroup *string `json:"apiGroup,omitempty"`
    -	Kind     *string `json:"kind,omitempty"`
    -	Name     *string `json:"name,omitempty"`
    -}
    -
    -// ResourceClaimParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimParametersReference type for use with
    -// apply.
    -func ResourceClaimParametersReference() *ResourceClaimParametersReferenceApplyConfiguration {
    -	return &ResourceClaimParametersReferenceApplyConfiguration{}
    -}
    -
    -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIGroup field is set to the value of the last call.
    -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimParametersReferenceApplyConfiguration {
    -	b.APIGroup = &value
    -	return b
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClaimParametersReferenceApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithName(value string) *ResourceClaimParametersReferenceApplyConfiguration {
    -	b.Name = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go
    deleted file mode 100644
    index 0c73e64e9e..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go
    +++ /dev/null
    @@ -1,61 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -)
    -
    -// ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use
    -// with apply.
    -type ResourceClaimSpecApplyConfiguration struct {
    -	ResourceClassName *string                                             `json:"resourceClassName,omitempty"`
    -	ParametersRef     *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"`
    -	AllocationMode    *resourcev1alpha2.AllocationMode                    `json:"allocationMode,omitempty"`
    -}
    -
    -// ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with
    -// apply.
    -func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration {
    -	return &ResourceClaimSpecApplyConfiguration{}
    -}
    -
    -// WithResourceClassName sets the ResourceClassName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceClassName field is set to the value of the last call.
    -func (b *ResourceClaimSpecApplyConfiguration) WithResourceClassName(value string) *ResourceClaimSpecApplyConfiguration {
    -	b.ResourceClassName = &value
    -	return b
    -}
    -
    -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ParametersRef field is set to the value of the last call.
    -func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimSpecApplyConfiguration {
    -	b.ParametersRef = value
    -	return b
    -}
    -
    -// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the AllocationMode field is set to the value of the last call.
    -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration {
    -	b.AllocationMode = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go
    deleted file mode 100644
    index fc2209b8f0..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go
    +++ /dev/null
    @@ -1,249 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceClaimTemplateApplyConfiguration represents an declarative configuration of the ResourceClaimTemplate type for use
    -// with apply.
    -type ResourceClaimTemplateApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	Spec                             *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
    -}
    -
    -// ResourceClaimTemplate constructs an declarative configuration of the ResourceClaimTemplate type for use with
    -// apply.
    -func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration {
    -	b := &ResourceClaimTemplateApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithNamespace(namespace)
    -	b.WithKind("ResourceClaimTemplate")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
    -// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
    -// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
    -// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
    -	return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
    -}
    -
    -// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
    -	return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
    -}
    -
    -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
    -	b := &ResourceClaimTemplateApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceClaimTemplate.Name)
    -	b.WithNamespace(resourceClaimTemplate.Namespace)
    -
    -	b.WithKind("ResourceClaimTemplate")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithSpec sets the Spec field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Spec field is set to the value of the last call.
    -func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
    -	b.Spec = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go
    deleted file mode 100644
    index 364fda9d00..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go
    +++ /dev/null
    @@ -1,275 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceClassApplyConfiguration represents an declarative configuration of the ResourceClass type for use
    -// with apply.
    -type ResourceClassApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	DriverName                       *string                                             `json:"driverName,omitempty"`
    -	ParametersRef                    *ResourceClassParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"`
    -	SuitableNodes                    *corev1.NodeSelectorApplyConfiguration              `json:"suitableNodes,omitempty"`
    -	StructuredParameters             *bool                                               `json:"structuredParameters,omitempty"`
    -}
    -
    -// ResourceClass constructs an declarative configuration of the ResourceClass type for use with
    -// apply.
    -func ResourceClass(name string) *ResourceClassApplyConfiguration {
    -	b := &ResourceClassApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithKind("ResourceClass")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceClass extracts the applied configuration owned by fieldManager from
    -// resourceClass. If no managedFields are found in resourceClass for fieldManager, a
    -// ResourceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceClass must be a unmodified ResourceClass API object that was retrieved from the Kubernetes API.
    -// ExtractResourceClass provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) {
    -	return extractResourceClass(resourceClass, fieldManager, "")
    -}
    -
    -// ExtractResourceClassStatus is the same as ExtractResourceClass except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) {
    -	return extractResourceClass(resourceClass, fieldManager, "status")
    -}
    -
    -func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) {
    -	b := &ResourceClassApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceClass.Name)
    -
    -	b.WithKind("ResourceClass")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *ResourceClassApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *ResourceClassApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithDriverName(value string) *ResourceClassApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ParametersRef field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithParametersRef(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassApplyConfiguration {
    -	b.ParametersRef = value
    -	return b
    -}
    -
    -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the SuitableNodes field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithSuitableNodes(value *corev1.NodeSelectorApplyConfiguration) *ResourceClassApplyConfiguration {
    -	b.SuitableNodes = value
    -	return b
    -}
    -
    -// WithStructuredParameters sets the StructuredParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StructuredParameters field is set to the value of the last call.
    -func (b *ResourceClassApplyConfiguration) WithStructuredParameters(value bool) *ResourceClassApplyConfiguration {
    -	b.StructuredParameters = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go
    deleted file mode 100644
    index 028d0d612d..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go
    +++ /dev/null
    @@ -1,277 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceClassParametersApplyConfiguration represents an declarative configuration of the ResourceClassParameters type for use
    -// with apply.
    -type ResourceClassParametersApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	GeneratedFrom                    *ResourceClassParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"`
    -	VendorParameters                 []VendorParametersApplyConfiguration                `json:"vendorParameters,omitempty"`
    -	Filters                          []ResourceFilterApplyConfiguration                  `json:"filters,omitempty"`
    -}
    -
    -// ResourceClassParameters constructs an declarative configuration of the ResourceClassParameters type for use with
    -// apply.
    -func ResourceClassParameters(name, namespace string) *ResourceClassParametersApplyConfiguration {
    -	b := &ResourceClassParametersApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithNamespace(namespace)
    -	b.WithKind("ResourceClassParameters")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceClassParameters extracts the applied configuration owned by fieldManager from
    -// resourceClassParameters. If no managedFields are found in resourceClassParameters for fieldManager, a
    -// ResourceClassParametersApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceClassParameters must be a unmodified ResourceClassParameters API object that was retrieved from the Kubernetes API.
    -// ExtractResourceClassParameters provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) {
    -	return extractResourceClassParameters(resourceClassParameters, fieldManager, "")
    -}
    -
    -// ExtractResourceClassParametersStatus is the same as ExtractResourceClassParameters except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceClassParametersStatus(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) {
    -	return extractResourceClassParameters(resourceClassParameters, fieldManager, "status")
    -}
    -
    -func extractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string, subresource string) (*ResourceClassParametersApplyConfiguration, error) {
    -	b := &ResourceClassParametersApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceClassParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClassParameters"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceClassParameters.Name)
    -	b.WithNamespace(resourceClassParameters.Namespace)
    -
    -	b.WithKind("ResourceClassParameters")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithKind(value string) *ResourceClassParametersApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClassParametersApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithName(value string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithGenerateName(value string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithNamespace(value string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithUID(value types.UID) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithGeneration(value int64) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceClassParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceClassParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceClassParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceClassParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClassParametersApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceClassParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GeneratedFrom field is set to the value of the last call.
    -func (b *ResourceClassParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration {
    -	b.GeneratedFrom = value
    -	return b
    -}
    -
    -// WithVendorParameters adds the given value to the VendorParameters field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the VendorParameters field.
    -func (b *ResourceClassParametersApplyConfiguration) WithVendorParameters(values ...*VendorParametersApplyConfiguration) *ResourceClassParametersApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithVendorParameters")
    -		}
    -		b.VendorParameters = append(b.VendorParameters, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFilters adds the given value to the Filters field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Filters field.
    -func (b *ResourceClassParametersApplyConfiguration) WithFilters(values ...*ResourceFilterApplyConfiguration) *ResourceClassParametersApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithFilters")
    -		}
    -		b.Filters = append(b.Filters, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go
    deleted file mode 100644
    index d67e4d3977..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go
    +++ /dev/null
    @@ -1,66 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use
    -// with apply.
    -type ResourceClassParametersReferenceApplyConfiguration struct {
    -	APIGroup  *string `json:"apiGroup,omitempty"`
    -	Kind      *string `json:"kind,omitempty"`
    -	Name      *string `json:"name,omitempty"`
    -	Namespace *string `json:"namespace,omitempty"`
    -}
    -
    -// ResourceClassParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClassParametersReference type for use with
    -// apply.
    -func ResourceClassParametersReference() *ResourceClassParametersReferenceApplyConfiguration {
    -	return &ResourceClassParametersReferenceApplyConfiguration{}
    -}
    -
    -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIGroup field is set to the value of the last call.
    -func (b *ResourceClassParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClassParametersReferenceApplyConfiguration {
    -	b.APIGroup = &value
    -	return b
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceClassParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClassParametersReferenceApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceClassParametersReferenceApplyConfiguration) WithName(value string) *ResourceClassParametersReferenceApplyConfiguration {
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceClassParametersReferenceApplyConfiguration) WithNamespace(value string) *ResourceClassParametersReferenceApplyConfiguration {
    -	b.Namespace = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go
    deleted file mode 100644
    index 15371b44a9..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go
    +++ /dev/null
    @@ -1,48 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceFilterApplyConfiguration represents an declarative configuration of the ResourceFilter type for use
    -// with apply.
    -type ResourceFilterApplyConfiguration struct {
    -	DriverName                            *string `json:"driverName,omitempty"`
    -	ResourceFilterModelApplyConfiguration `json:",inline"`
    -}
    -
    -// ResourceFilterApplyConfiguration constructs an declarative configuration of the ResourceFilter type for use with
    -// apply.
    -func ResourceFilter() *ResourceFilterApplyConfiguration {
    -	return &ResourceFilterApplyConfiguration{}
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *ResourceFilterApplyConfiguration) WithDriverName(value string) *ResourceFilterApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceFilterApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go
    deleted file mode 100644
    index 4f8d138f71..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceFilterModelApplyConfiguration represents an declarative configuration of the ResourceFilterModel type for use
    -// with apply.
    -type ResourceFilterModelApplyConfiguration struct {
    -	NamedResources *NamedResourcesFilterApplyConfiguration `json:"namedResources,omitempty"`
    -}
    -
    -// ResourceFilterModelApplyConfiguration constructs an declarative configuration of the ResourceFilterModel type for use with
    -// apply.
    -func ResourceFilterModel() *ResourceFilterModelApplyConfiguration {
    -	return &ResourceFilterModelApplyConfiguration{}
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceFilterModelApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterModelApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go
    deleted file mode 100644
    index b4f3da735d..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use
    -// with apply.
    -type ResourceHandleApplyConfiguration struct {
    -	DriverName     *string                                     `json:"driverName,omitempty"`
    -	Data           *string                                     `json:"data,omitempty"`
    -	StructuredData *StructuredResourceHandleApplyConfiguration `json:"structuredData,omitempty"`
    -}
    -
    -// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with
    -// apply.
    -func ResourceHandle() *ResourceHandleApplyConfiguration {
    -	return &ResourceHandleApplyConfiguration{}
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithData sets the Data field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Data field is set to the value of the last call.
    -func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration {
    -	b.Data = &value
    -	return b
    -}
    -
    -// WithStructuredData sets the StructuredData field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the StructuredData field is set to the value of the last call.
    -func (b *ResourceHandleApplyConfiguration) WithStructuredData(value *StructuredResourceHandleApplyConfiguration) *ResourceHandleApplyConfiguration {
    -	b.StructuredData = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go
    deleted file mode 100644
    index 8ad7bdf230..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceModelApplyConfiguration represents an declarative configuration of the ResourceModel type for use
    -// with apply.
    -type ResourceModelApplyConfiguration struct {
    -	NamedResources *NamedResourcesResourcesApplyConfiguration `json:"namedResources,omitempty"`
    -}
    -
    -// ResourceModelApplyConfiguration constructs an declarative configuration of the ResourceModel type for use with
    -// apply.
    -func ResourceModel() *ResourceModelApplyConfiguration {
    -	return &ResourceModelApplyConfiguration{}
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceModelApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceModelApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go
    deleted file mode 100644
    index 0243d06f89..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go
    +++ /dev/null
    @@ -1,52 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// ResourceRequestApplyConfiguration represents an declarative configuration of the ResourceRequest type for use
    -// with apply.
    -type ResourceRequestApplyConfiguration struct {
    -	VendorParameters                       *runtime.RawExtension `json:"vendorParameters,omitempty"`
    -	ResourceRequestModelApplyConfiguration `json:",inline"`
    -}
    -
    -// ResourceRequestApplyConfiguration constructs an declarative configuration of the ResourceRequest type for use with
    -// apply.
    -func ResourceRequest() *ResourceRequestApplyConfiguration {
    -	return &ResourceRequestApplyConfiguration{}
    -}
    -
    -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VendorParameters field is set to the value of the last call.
    -func (b *ResourceRequestApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *ResourceRequestApplyConfiguration {
    -	b.VendorParameters = &value
    -	return b
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceRequestApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go
    deleted file mode 100644
    index 35bd1d88fe..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -// ResourceRequestModelApplyConfiguration represents an declarative configuration of the ResourceRequestModel type for use
    -// with apply.
    -type ResourceRequestModelApplyConfiguration struct {
    -	NamedResources *NamedResourcesRequestApplyConfiguration `json:"namedResources,omitempty"`
    -}
    -
    -// ResourceRequestModelApplyConfiguration constructs an declarative configuration of the ResourceRequestModel type for use with
    -// apply.
    -func ResourceRequestModel() *ResourceRequestModelApplyConfiguration {
    -	return &ResourceRequestModelApplyConfiguration{}
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceRequestModelApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestModelApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go
    deleted file mode 100644
    index ff737ce672..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go
    +++ /dev/null
    @@ -1,265 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    -	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    -	internal "k8s.io/client-go/applyconfigurations/internal"
    -	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    -)
    -
    -// ResourceSliceApplyConfiguration represents an declarative configuration of the ResourceSlice type for use
    -// with apply.
    -type ResourceSliceApplyConfiguration struct {
    -	v1.TypeMetaApplyConfiguration    `json:",inline"`
    -	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    -	NodeName                         *string `json:"nodeName,omitempty"`
    -	DriverName                       *string `json:"driverName,omitempty"`
    -	ResourceModelApplyConfiguration  `json:",inline"`
    -}
    -
    -// ResourceSlice constructs an declarative configuration of the ResourceSlice type for use with
    -// apply.
    -func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
    -	b := &ResourceSliceApplyConfiguration{}
    -	b.WithName(name)
    -	b.WithKind("ResourceSlice")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b
    -}
    -
    -// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
    -// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
    -// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
    -// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    -// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    -// the fieldManager never owned fields any fields.
    -// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
    -// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
    -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    -// applied if another fieldManager has updated or force applied any of the previously applied fields.
    -// Experimental!
    -func ExtractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
    -	return extractResourceSlice(resourceSlice, fieldManager, "")
    -}
    -
    -// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
    -// that it extracts the status subresource applied configuration.
    -// Experimental!
    -func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
    -	return extractResourceSlice(resourceSlice, fieldManager, "status")
    -}
    -
    -func extractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
    -	b := &ResourceSliceApplyConfiguration{}
    -	err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceSlice"), fieldManager, b, subresource)
    -	if err != nil {
    -		return nil, err
    -	}
    -	b.WithName(resourceSlice.Name)
    -
    -	b.WithKind("ResourceSlice")
    -	b.WithAPIVersion("resource.k8s.io/v1alpha2")
    -	return b, nil
    -}
    -
    -// WithKind sets the Kind field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Kind field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration {
    -	b.Kind = &value
    -	return b
    -}
    -
    -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the APIVersion field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration {
    -	b.APIVersion = &value
    -	return b
    -}
    -
    -// WithName sets the Name field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Name field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Name = &value
    -	return b
    -}
    -
    -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the GenerateName field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.GenerateName = &value
    -	return b
    -}
    -
    -// WithNamespace sets the Namespace field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Namespace field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Namespace = &value
    -	return b
    -}
    -
    -// WithUID sets the UID field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the UID field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.UID = &value
    -	return b
    -}
    -
    -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the ResourceVersion field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.ResourceVersion = &value
    -	return b
    -}
    -
    -// WithGeneration sets the Generation field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Generation field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.Generation = &value
    -	return b
    -}
    -
    -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.CreationTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionTimestamp = &value
    -	return b
    -}
    -
    -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	b.DeletionGracePeriodSeconds = &value
    -	return b
    -}
    -
    -// WithLabels puts the entries into the Labels field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Labels field,
    -// overwriting an existing map entries in Labels field with the same key.
    -func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Labels == nil && len(entries) > 0 {
    -		b.Labels = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Labels[k] = v
    -	}
    -	return b
    -}
    -
    -// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, the entries provided by each call will be put on the Annotations field,
    -// overwriting an existing map entries in Annotations field with the same key.
    -func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	if b.Annotations == nil && len(entries) > 0 {
    -		b.Annotations = make(map[string]string, len(entries))
    -	}
    -	for k, v := range entries {
    -		b.Annotations[k] = v
    -	}
    -	return b
    -}
    -
    -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    -func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithOwnerReferences")
    -		}
    -		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    -	}
    -	return b
    -}
    -
    -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Finalizers field.
    -func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration {
    -	b.ensureObjectMetaApplyConfigurationExists()
    -	for i := range values {
    -		b.Finalizers = append(b.Finalizers, values[i])
    -	}
    -	return b
    -}
    -
    -func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    -	if b.ObjectMetaApplyConfiguration == nil {
    -		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    -	}
    -}
    -
    -// WithNodeName sets the NodeName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NodeName field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithNodeName(value string) *ResourceSliceApplyConfiguration {
    -	b.NodeName = &value
    -	return b
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithDriverName(value string) *ResourceSliceApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NamedResources field is set to the value of the last call.
    -func (b *ResourceSliceApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceSliceApplyConfiguration {
    -	b.NamedResources = value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go
    deleted file mode 100644
    index e6efcbfef3..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go
    +++ /dev/null
    @@ -1,75 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// StructuredResourceHandleApplyConfiguration represents an declarative configuration of the StructuredResourceHandle type for use
    -// with apply.
    -type StructuredResourceHandleApplyConfiguration struct {
    -	VendorClassParameters *runtime.RawExtension                      `json:"vendorClassParameters,omitempty"`
    -	VendorClaimParameters *runtime.RawExtension                      `json:"vendorClaimParameters,omitempty"`
    -	NodeName              *string                                    `json:"nodeName,omitempty"`
    -	Results               []DriverAllocationResultApplyConfiguration `json:"results,omitempty"`
    -}
    -
    -// StructuredResourceHandleApplyConfiguration constructs an declarative configuration of the StructuredResourceHandle type for use with
    -// apply.
    -func StructuredResourceHandle() *StructuredResourceHandleApplyConfiguration {
    -	return &StructuredResourceHandleApplyConfiguration{}
    -}
    -
    -// WithVendorClassParameters sets the VendorClassParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VendorClassParameters field is set to the value of the last call.
    -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClassParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration {
    -	b.VendorClassParameters = &value
    -	return b
    -}
    -
    -// WithVendorClaimParameters sets the VendorClaimParameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the VendorClaimParameters field is set to the value of the last call.
    -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClaimParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration {
    -	b.VendorClaimParameters = &value
    -	return b
    -}
    -
    -// WithNodeName sets the NodeName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the NodeName field is set to the value of the last call.
    -func (b *StructuredResourceHandleApplyConfiguration) WithNodeName(value string) *StructuredResourceHandleApplyConfiguration {
    -	b.NodeName = &value
    -	return b
    -}
    -
    -// WithResults adds the given value to the Results field in the declarative configuration
    -// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    -// If called multiple times, values provided by each call will be appended to the Results field.
    -func (b *StructuredResourceHandleApplyConfiguration) WithResults(values ...*DriverAllocationResultApplyConfiguration) *StructuredResourceHandleApplyConfiguration {
    -	for i := range values {
    -		if values[i] == nil {
    -			panic("nil value passed to WithResults")
    -		}
    -		b.Results = append(b.Results, *values[i])
    -	}
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go
    deleted file mode 100644
    index f7a8ff9ece..0000000000
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go
    +++ /dev/null
    @@ -1,52 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by applyconfiguration-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	runtime "k8s.io/apimachinery/pkg/runtime"
    -)
    -
    -// VendorParametersApplyConfiguration represents an declarative configuration of the VendorParameters type for use
    -// with apply.
    -type VendorParametersApplyConfiguration struct {
    -	DriverName *string               `json:"driverName,omitempty"`
    -	Parameters *runtime.RawExtension `json:"parameters,omitempty"`
    -}
    -
    -// VendorParametersApplyConfiguration constructs an declarative configuration of the VendorParameters type for use with
    -// apply.
    -func VendorParameters() *VendorParametersApplyConfiguration {
    -	return &VendorParametersApplyConfiguration{}
    -}
    -
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *VendorParametersApplyConfiguration) WithDriverName(value string) *VendorParametersApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
    -// WithParameters sets the Parameters field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the Parameters field is set to the value of the last call.
    -func (b *VendorParametersApplyConfiguration) WithParameters(value runtime.RawExtension) *VendorParametersApplyConfiguration {
    -	b.Parameters = &value
    -	return b
    -}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go
    new file mode 100644
    index 0000000000..3090b2f9d3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go
    @@ -0,0 +1,61 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1 "k8s.io/client-go/applyconfigurations/core/v1"
    +)
    +
    +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use
    +// with apply.
    +type AllocationResultApplyConfiguration struct {
    +	Devices      *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
    +	NodeSelector *v1.NodeSelectorApplyConfiguration        `json:"nodeSelector,omitempty"`
    +	Controller   *string                                   `json:"controller,omitempty"`
    +}
    +
    +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
    +// apply.
    +func AllocationResult() *AllocationResultApplyConfiguration {
    +	return &AllocationResultApplyConfiguration{}
    +}
    +
    +// WithDevices sets the Devices field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Devices field is set to the value of the last call.
    +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration {
    +	b.Devices = value
    +	return b
    +}
    +
    +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the NodeSelector field is set to the value of the last call.
    +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration {
    +	b.NodeSelector = value
    +	return b
    +}
    +
    +// WithController sets the Controller field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Controller field is set to the value of the last call.
    +func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration {
    +	b.Controller = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go
    new file mode 100644
    index 0000000000..e6b7745082
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go
    @@ -0,0 +1,65 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	resource "k8s.io/apimachinery/pkg/api/resource"
    +)
    +
    +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use
    +// with apply.
    +type BasicDeviceApplyConfiguration struct {
    +	Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
    +	Capacity   map[v1alpha3.QualifiedName]resource.Quantity                 `json:"capacity,omitempty"`
    +}
    +
    +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with
    +// apply.
    +func BasicDevice() *BasicDeviceApplyConfiguration {
    +	return &BasicDeviceApplyConfiguration{}
    +}
    +
    +// WithAttributes puts the entries into the Attributes field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Attributes field,
    +// overwriting an existing map entries in Attributes field with the same key.
    +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration {
    +	if b.Attributes == nil && len(entries) > 0 {
    +		b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Attributes[k] = v
    +	}
    +	return b
    +}
    +
    +// WithCapacity puts the entries into the Capacity field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Capacity field,
    +// overwriting an existing map entries in Capacity field with the same key.
    +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration {
    +	if b.Capacity == nil && len(entries) > 0 {
    +		b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Capacity[k] = v
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go
    new file mode 100644
    index 0000000000..c59b6a2e37
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
    +// with apply.
    +type CELDeviceSelectorApplyConfiguration struct {
    +	Expression *string `json:"expression,omitempty"`
    +}
    +
    +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with
    +// apply.
    +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration {
    +	return &CELDeviceSelectorApplyConfiguration{}
    +}
    +
    +// WithExpression sets the Expression field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Expression field is set to the value of the last call.
    +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration {
    +	b.Expression = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go
    new file mode 100644
    index 0000000000..efdb5f37a9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use
    +// with apply.
    +type DeviceApplyConfiguration struct {
    +	Name  *string                        `json:"name,omitempty"`
    +	Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"`
    +}
    +
    +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with
    +// apply.
    +func Device() *DeviceApplyConfiguration {
    +	return &DeviceApplyConfiguration{}
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration {
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithBasic sets the Basic field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Basic field is set to the value of the last call.
    +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration {
    +	b.Basic = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go
    new file mode 100644
    index 0000000000..342e724ef0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go
    @@ -0,0 +1,63 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +)
    +
    +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
    +// with apply.
    +type DeviceAllocationConfigurationApplyConfiguration struct {
    +	Source                                *v1alpha3.AllocationConfigSource `json:"source,omitempty"`
    +	Requests                              []string                         `json:"requests,omitempty"`
    +	DeviceConfigurationApplyConfiguration `json:",inline"`
    +}
    +
    +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with
    +// apply.
    +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration {
    +	return &DeviceAllocationConfigurationApplyConfiguration{}
    +}
    +
    +// WithSource sets the Source field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Source field is set to the value of the last call.
    +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration {
    +	b.Source = &value
    +	return b
    +}
    +
    +// WithRequests adds the given value to the Requests field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Requests field.
    +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration {
    +	for i := range values {
    +		b.Requests = append(b.Requests, values[i])
    +	}
    +	return b
    +}
    +
    +// WithOpaque sets the Opaque field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Opaque field is set to the value of the last call.
    +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration {
    +	b.Opaque = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go
    new file mode 100644
    index 0000000000..0cfb264b4e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go
    @@ -0,0 +1,58 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use
    +// with apply.
    +type DeviceAllocationResultApplyConfiguration struct {
    +	Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"`
    +	Config  []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
    +}
    +
    +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with
    +// apply.
    +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration {
    +	return &DeviceAllocationResultApplyConfiguration{}
    +}
    +
    +// WithResults adds the given value to the Results field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Results field.
    +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithResults")
    +		}
    +		b.Results = append(b.Results, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithConfig adds the given value to the Config field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Config field.
    +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithConfig")
    +		}
    +		b.Config = append(b.Config, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go
    new file mode 100644
    index 0000000000..6b0b7a40ac
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use
    +// with apply.
    +type DeviceAttributeApplyConfiguration struct {
    +	IntValue     *int64  `json:"int,omitempty"`
    +	BoolValue    *bool   `json:"bool,omitempty"`
    +	StringValue  *string `json:"string,omitempty"`
    +	VersionValue *string `json:"version,omitempty"`
    +}
    +
    +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with
    +// apply.
    +func DeviceAttribute() *DeviceAttributeApplyConfiguration {
    +	return &DeviceAttributeApplyConfiguration{}
    +}
    +
    +// WithIntValue sets the IntValue field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the IntValue field is set to the value of the last call.
    +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration {
    +	b.IntValue = &value
    +	return b
    +}
    +
    +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the BoolValue field is set to the value of the last call.
    +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration {
    +	b.BoolValue = &value
    +	return b
    +}
    +
    +// WithStringValue sets the StringValue field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the StringValue field is set to the value of the last call.
    +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration {
    +	b.StringValue = &value
    +	return b
    +}
    +
    +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the VersionValue field is set to the value of the last call.
    +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration {
    +	b.VersionValue = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go
    new file mode 100644
    index 0000000000..ce3ab56d8b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go
    @@ -0,0 +1,72 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use
    +// with apply.
    +type DeviceClaimApplyConfiguration struct {
    +	Requests    []DeviceRequestApplyConfiguration            `json:"requests,omitempty"`
    +	Constraints []DeviceConstraintApplyConfiguration         `json:"constraints,omitempty"`
    +	Config      []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
    +}
    +
    +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with
    +// apply.
    +func DeviceClaim() *DeviceClaimApplyConfiguration {
    +	return &DeviceClaimApplyConfiguration{}
    +}
    +
    +// WithRequests adds the given value to the Requests field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Requests field.
    +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithRequests")
    +		}
    +		b.Requests = append(b.Requests, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithConstraints adds the given value to the Constraints field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Constraints field.
    +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithConstraints")
    +		}
    +		b.Constraints = append(b.Constraints, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithConfig adds the given value to the Config field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Config field.
    +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithConfig")
    +		}
    +		b.Config = append(b.Config, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go
    new file mode 100644
    index 0000000000..4cabe98599
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go
    @@ -0,0 +1,50 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use
    +// with apply.
    +type DeviceClaimConfigurationApplyConfiguration struct {
    +	Requests                              []string `json:"requests,omitempty"`
    +	DeviceConfigurationApplyConfiguration `json:",inline"`
    +}
    +
    +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with
    +// apply.
    +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration {
    +	return &DeviceClaimConfigurationApplyConfiguration{}
    +}
    +
    +// WithRequests adds the given value to the Requests field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Requests field.
    +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration {
    +	for i := range values {
    +		b.Requests = append(b.Requests, values[i])
    +	}
    +	return b
    +}
    +
    +// WithOpaque sets the Opaque field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Opaque field is set to the value of the last call.
    +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration {
    +	b.Opaque = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go
    new file mode 100644
    index 0000000000..abaadbb366
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go
    @@ -0,0 +1,253 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use
    +// with apply.
    +type DeviceClassApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
    +}
    +
    +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with
    +// apply.
    +func DeviceClass(name string) *DeviceClassApplyConfiguration {
    +	b := &DeviceClassApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("DeviceClass")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b
    +}
    +
    +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
    +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
    +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
    +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
    +	return extractDeviceClass(deviceClass, fieldManager, "")
    +}
    +
    +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractDeviceClassStatus(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
    +	return extractDeviceClass(deviceClass, fieldManager, "status")
    +}
    +
    +func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
    +	b := &DeviceClassApplyConfiguration{}
    +	err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceClass"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(deviceClass.Name)
    +
    +	b.WithKind("DeviceClass")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *DeviceClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go
    new file mode 100644
    index 0000000000..cb3758a3e3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use
    +// with apply.
    +type DeviceClassConfigurationApplyConfiguration struct {
    +	DeviceConfigurationApplyConfiguration `json:",inline"`
    +}
    +
    +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with
    +// apply.
    +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration {
    +	return &DeviceClassConfigurationApplyConfiguration{}
    +}
    +
    +// WithOpaque sets the Opaque field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Opaque field is set to the value of the last call.
    +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration {
    +	b.Opaque = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go
    new file mode 100644
    index 0000000000..d40a43de66
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go
    @@ -0,0 +1,71 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1 "k8s.io/client-go/applyconfigurations/core/v1"
    +)
    +
    +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
    +// with apply.
    +type DeviceClassSpecApplyConfiguration struct {
    +	Selectors     []DeviceSelectorApplyConfiguration           `json:"selectors,omitempty"`
    +	Config        []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
    +	SuitableNodes *v1.NodeSelectorApplyConfiguration           `json:"suitableNodes,omitempty"`
    +}
    +
    +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
    +// apply.
    +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration {
    +	return &DeviceClassSpecApplyConfiguration{}
    +}
    +
    +// WithSelectors adds the given value to the Selectors field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Selectors field.
    +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithSelectors")
    +		}
    +		b.Selectors = append(b.Selectors, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithConfig adds the given value to the Config field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Config field.
    +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithConfig")
    +		}
    +		b.Config = append(b.Config, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the SuitableNodes field is set to the value of the last call.
    +func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration {
    +	b.SuitableNodes = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go
    new file mode 100644
    index 0000000000..62c0d997de
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use
    +// with apply.
    +type DeviceConfigurationApplyConfiguration struct {
    +	Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"`
    +}
    +
    +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with
    +// apply.
    +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration {
    +	return &DeviceConfigurationApplyConfiguration{}
    +}
    +
    +// WithOpaque sets the Opaque field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Opaque field is set to the value of the last call.
    +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration {
    +	b.Opaque = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go
    new file mode 100644
    index 0000000000..479acd57c2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +)
    +
    +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
    +// with apply.
    +type DeviceConstraintApplyConfiguration struct {
    +	Requests       []string                     `json:"requests,omitempty"`
    +	MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"`
    +}
    +
    +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with
    +// apply.
    +func DeviceConstraint() *DeviceConstraintApplyConfiguration {
    +	return &DeviceConstraintApplyConfiguration{}
    +}
    +
    +// WithRequests adds the given value to the Requests field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Requests field.
    +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration {
    +	for i := range values {
    +		b.Requests = append(b.Requests, values[i])
    +	}
    +	return b
    +}
    +
    +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the MatchAttribute field is set to the value of the last call.
    +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration {
    +	b.MatchAttribute = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go
    new file mode 100644
    index 0000000000..e5c87efe47
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go
    @@ -0,0 +1,93 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +)
    +
    +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use
    +// with apply.
    +type DeviceRequestApplyConfiguration struct {
    +	Name            *string                                `json:"name,omitempty"`
    +	DeviceClassName *string                                `json:"deviceClassName,omitempty"`
    +	Selectors       []DeviceSelectorApplyConfiguration     `json:"selectors,omitempty"`
    +	AllocationMode  *resourcev1alpha3.DeviceAllocationMode `json:"allocationMode,omitempty"`
    +	Count           *int64                                 `json:"count,omitempty"`
    +	AdminAccess     *bool                                  `json:"adminAccess,omitempty"`
    +}
    +
    +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with
    +// apply.
    +func DeviceRequest() *DeviceRequestApplyConfiguration {
    +	return &DeviceRequestApplyConfiguration{}
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration {
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeviceClassName field is set to the value of the last call.
    +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration {
    +	b.DeviceClassName = &value
    +	return b
    +}
    +
    +// WithSelectors adds the given value to the Selectors field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Selectors field.
    +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithSelectors")
    +		}
    +		b.Selectors = append(b.Selectors, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the AllocationMode field is set to the value of the last call.
    +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1alpha3.DeviceAllocationMode) *DeviceRequestApplyConfiguration {
    +	b.AllocationMode = &value
    +	return b
    +}
    +
    +// WithCount sets the Count field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Count field is set to the value of the last call.
    +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration {
    +	b.Count = &value
    +	return b
    +}
    +
    +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the AdminAccess field is set to the value of the last call.
    +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration {
    +	b.AdminAccess = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go
    new file mode 100644
    index 0000000000..712b9bf9b1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
    +// with apply.
    +type DeviceRequestAllocationResultApplyConfiguration struct {
    +	Request *string `json:"request,omitempty"`
    +	Driver  *string `json:"driver,omitempty"`
    +	Pool    *string `json:"pool,omitempty"`
    +	Device  *string `json:"device,omitempty"`
    +}
    +
    +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
    +// apply.
    +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration {
    +	return &DeviceRequestAllocationResultApplyConfiguration{}
    +}
    +
    +// WithRequest sets the Request field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Request field is set to the value of the last call.
    +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration {
    +	b.Request = &value
    +	return b
    +}
    +
    +// WithDriver sets the Driver field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Driver field is set to the value of the last call.
    +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration {
    +	b.Driver = &value
    +	return b
    +}
    +
    +// WithPool sets the Pool field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Pool field is set to the value of the last call.
    +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration {
    +	b.Pool = &value
    +	return b
    +}
    +
    +// WithDevice sets the Device field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Device field is set to the value of the last call.
    +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration {
    +	b.Device = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go
    new file mode 100644
    index 0000000000..574299d15e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
    +// with apply.
    +type DeviceSelectorApplyConfiguration struct {
    +	CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
    +}
    +
    +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with
    +// apply.
    +func DeviceSelector() *DeviceSelectorApplyConfiguration {
    +	return &DeviceSelectorApplyConfiguration{}
    +}
    +
    +// WithCEL sets the CEL field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CEL field is set to the value of the last call.
    +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration {
    +	b.CEL = value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go
    new file mode 100644
    index 0000000000..caf9d059c3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use
    +// with apply.
    +type OpaqueDeviceConfigurationApplyConfiguration struct {
    +	Driver     *string               `json:"driver,omitempty"`
    +	Parameters *runtime.RawExtension `json:"parameters,omitempty"`
    +}
    +
    +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with
    +// apply.
    +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration {
    +	return &OpaqueDeviceConfigurationApplyConfiguration{}
    +}
    +
    +// WithDriver sets the Driver field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Driver field is set to the value of the last call.
    +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration {
    +	b.Driver = &value
    +	return b
    +}
    +
    +// WithParameters sets the Parameters field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Parameters field is set to the value of the last call.
    +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration {
    +	b.Parameters = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go
    new file mode 100644
    index 0000000000..ee8e73ebe2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go
    @@ -0,0 +1,264 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use
    +// with apply.
    +type PodSchedulingContextApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *PodSchedulingContextSpecApplyConfiguration   `json:"spec,omitempty"`
    +	Status                           *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"`
    +}
    +
    +// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with
    +// apply.
    +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration {
    +	b := &PodSchedulingContextApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithNamespace(namespace)
    +	b.WithKind("PodSchedulingContext")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b
    +}
    +
    +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from
    +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a
    +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API.
    +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
    +	return extractPodSchedulingContext(podSchedulingContext, fieldManager, "")
    +}
    +
    +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
    +	return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status")
    +}
    +
    +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) {
    +	b := &PodSchedulingContextApplyConfiguration{}
    +	err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(podSchedulingContext.Name)
    +	b.WithNamespace(podSchedulingContext.Namespace)
    +
    +	b.WithKind("PodSchedulingContext")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// WithStatus sets the Status field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Status field is set to the value of the last call.
    +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration {
    +	b.Status = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PodSchedulingContextApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go
    similarity index 87%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go
    index c95d3295e8..fd25df7a53 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go
    @@ -16,16 +16,16 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
    -// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use
    +// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use
     // with apply.
     type PodSchedulingContextSpecApplyConfiguration struct {
     	SelectedNode   *string  `json:"selectedNode,omitempty"`
     	PotentialNodes []string `json:"potentialNodes,omitempty"`
     }
     
    -// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with
    +// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with
     // apply.
     func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration {
     	return &PodSchedulingContextSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go
    similarity index 84%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go
    index a8b10b9a0e..a06e370cc3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go
    @@ -16,15 +16,15 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
    -// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use
    +// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use
     // with apply.
     type PodSchedulingContextStatusApplyConfiguration struct {
     	ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"`
     }
     
    -// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with
    +// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with
     // apply.
     func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration {
     	return &PodSchedulingContextStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go
    new file mode 100644
    index 0000000000..6161595588
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go
    @@ -0,0 +1,264 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
    +// with apply.
    +type ResourceClaimApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *ResourceClaimSpecApplyConfiguration   `json:"spec,omitempty"`
    +	Status                           *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
    +}
    +
    +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with
    +// apply.
    +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
    +	b := &ResourceClaimApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithNamespace(namespace)
    +	b.WithKind("ResourceClaim")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b
    +}
    +
    +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
    +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
    +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
    +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
    +	return extractResourceClaim(resourceClaim, fieldManager, "")
    +}
    +
    +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
    +	return extractResourceClaim(resourceClaim, fieldManager, "status")
    +}
    +
    +func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
    +	b := &ResourceClaimApplyConfiguration{}
    +	err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaim"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(resourceClaim.Name)
    +	b.WithNamespace(resourceClaim.Namespace)
    +
    +	b.WithKind("ResourceClaim")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// WithStatus sets the Status field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Status field is set to the value of the last call.
    +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration {
    +	b.Status = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ResourceClaimApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go
    similarity index 94%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go
    index 41bb9e9a14..96196d7c95 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go
    @@ -16,13 +16,13 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
     import (
     	types "k8s.io/apimachinery/pkg/types"
     )
     
    -// ResourceClaimConsumerReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimConsumerReference type for use
    +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use
     // with apply.
     type ResourceClaimConsumerReferenceApplyConfiguration struct {
     	APIGroup *string    `json:"apiGroup,omitempty"`
    @@ -31,7 +31,7 @@ type ResourceClaimConsumerReferenceApplyConfiguration struct {
     	UID      *types.UID `json:"uid,omitempty"`
     }
     
    -// ResourceClaimConsumerReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimConsumerReference type for use with
    +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with
     // apply.
     func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration {
     	return &ResourceClaimConsumerReferenceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go
    similarity index 86%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go
    index e74679aed3..caab89acdb 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go
    @@ -16,16 +16,16 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
    -// ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use
    +// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use
     // with apply.
     type ResourceClaimSchedulingStatusApplyConfiguration struct {
     	Name            *string  `json:"name,omitempty"`
     	UnsuitableNodes []string `json:"unsuitableNodes,omitempty"`
     }
     
    -// ResourceClaimSchedulingStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimSchedulingStatus type for use with
    +// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with
     // apply.
     func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration {
     	return &ResourceClaimSchedulingStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go
    new file mode 100644
    index 0000000000..7c5b65681d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
    +// with apply.
    +type ResourceClaimSpecApplyConfiguration struct {
    +	Devices    *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
    +	Controller *string                        `json:"controller,omitempty"`
    +}
    +
    +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with
    +// apply.
    +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration {
    +	return &ResourceClaimSpecApplyConfiguration{}
    +}
    +
    +// WithDevices sets the Devices field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Devices field is set to the value of the last call.
    +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration {
    +	b.Devices = value
    +	return b
    +}
    +
    +// WithController sets the Controller field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Controller field is set to the value of the last call.
    +func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration {
    +	b.Controller = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
    similarity index 77%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
    index c6fa610906..a52af3ec36 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
    @@ -16,31 +16,22 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
    -// ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use
    +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
     // with apply.
     type ResourceClaimStatusApplyConfiguration struct {
    -	DriverName            *string                                            `json:"driverName,omitempty"`
     	Allocation            *AllocationResultApplyConfiguration                `json:"allocation,omitempty"`
     	ReservedFor           []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
     	DeallocationRequested *bool                                              `json:"deallocationRequested,omitempty"`
     }
     
    -// ResourceClaimStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimStatus type for use with
    +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
     // apply.
     func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration {
     	return &ResourceClaimStatusApplyConfiguration{}
     }
     
    -// WithDriverName sets the DriverName field in the declarative configuration to the given value
    -// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    -// If called multiple times, the DriverName field is set to the value of the last call.
    -func (b *ResourceClaimStatusApplyConfiguration) WithDriverName(value string) *ResourceClaimStatusApplyConfiguration {
    -	b.DriverName = &value
    -	return b
    -}
    -
     // WithAllocation sets the Allocation field in the declarative configuration to the given value
     // and returns the receiver, so that objects can be built by chaining "With" function invocations.
     // If called multiple times, the Allocation field is set to the value of the last call.
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go
    new file mode 100644
    index 0000000000..6f371d0c05
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go
    @@ -0,0 +1,255 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use
    +// with apply.
    +type ResourceClaimTemplateApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
    +}
    +
    +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with
    +// apply.
    +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration {
    +	b := &ResourceClaimTemplateApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithNamespace(namespace)
    +	b.WithKind("ResourceClaimTemplate")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b
    +}
    +
    +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
    +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
    +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
    +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
    +	return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
    +}
    +
    +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
    +	return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
    +}
    +
    +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
    +	b := &ResourceClaimTemplateApplyConfiguration{}
    +	err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaimTemplate"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(resourceClaimTemplate.Name)
    +	b.WithNamespace(resourceClaimTemplate.Namespace)
    +
    +	b.WithKind("ResourceClaimTemplate")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
    similarity index 94%
    rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go
    rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
    index 2f38ea0366..5b03ab7553 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
    @@ -16,7 +16,7 @@ limitations under the License.
     
     // Code generated by applyconfiguration-gen. DO NOT EDIT.
     
    -package v1alpha2
    +package v1alpha3
     
     import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    @@ -24,14 +24,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// ResourceClaimTemplateSpecApplyConfiguration represents an declarative configuration of the ResourceClaimTemplateSpec type for use
    +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use
     // with apply.
     type ResourceClaimTemplateSpecApplyConfiguration struct {
     	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
     	Spec                             *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// ResourceClaimTemplateSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimTemplateSpec type for use with
    +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with
     // apply.
     func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration {
     	return &ResourceClaimTemplateSpecApplyConfiguration{}
    @@ -186,3 +186,9 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceCl
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go
    new file mode 100644
    index 0000000000..23825d137f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go
    @@ -0,0 +1,57 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use
    +// with apply.
    +type ResourcePoolApplyConfiguration struct {
    +	Name               *string `json:"name,omitempty"`
    +	Generation         *int64  `json:"generation,omitempty"`
    +	ResourceSliceCount *int64  `json:"resourceSliceCount,omitempty"`
    +}
    +
    +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with
    +// apply.
    +func ResourcePool() *ResourcePoolApplyConfiguration {
    +	return &ResourcePoolApplyConfiguration{}
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration {
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration {
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceSliceCount field is set to the value of the last call.
    +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration {
    +	b.ResourceSliceCount = &value
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go
    new file mode 100644
    index 0000000000..aaad68612e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go
    @@ -0,0 +1,253 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use
    +// with apply.
    +type ResourceSliceApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	Spec                             *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
    +}
    +
    +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with
    +// apply.
    +func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
    +	b := &ResourceSliceApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("ResourceSlice")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b
    +}
    +
    +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
    +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
    +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
    +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
    +	return extractResourceSlice(resourceSlice, fieldManager, "")
    +}
    +
    +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
    +	return extractResourceSlice(resourceSlice, fieldManager, "status")
    +}
    +
    +func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
    +	b := &ResourceSliceApplyConfiguration{}
    +	err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceSlice"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(resourceSlice.Name)
    +
    +	b.WithKind("ResourceSlice")
    +	b.WithAPIVersion("resource.k8s.io/v1alpha3")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithSpec sets the Spec field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Spec field is set to the value of the last call.
    +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration {
    +	b.Spec = value
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *ResourceSliceApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go
    new file mode 100644
    index 0000000000..2ded759073
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go
    @@ -0,0 +1,93 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1 "k8s.io/client-go/applyconfigurations/core/v1"
    +)
    +
    +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use
    +// with apply.
    +type ResourceSliceSpecApplyConfiguration struct {
    +	Driver       *string                            `json:"driver,omitempty"`
    +	Pool         *ResourcePoolApplyConfiguration    `json:"pool,omitempty"`
    +	NodeName     *string                            `json:"nodeName,omitempty"`
    +	NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
    +	AllNodes     *bool                              `json:"allNodes,omitempty"`
    +	Devices      []DeviceApplyConfiguration         `json:"devices,omitempty"`
    +}
    +
    +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with
    +// apply.
    +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration {
    +	return &ResourceSliceSpecApplyConfiguration{}
    +}
    +
    +// WithDriver sets the Driver field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Driver field is set to the value of the last call.
    +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration {
    +	b.Driver = &value
    +	return b
    +}
    +
    +// WithPool sets the Pool field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Pool field is set to the value of the last call.
    +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
    +	b.Pool = value
    +	return b
    +}
    +
    +// WithNodeName sets the NodeName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the NodeName field is set to the value of the last call.
    +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration {
    +	b.NodeName = &value
    +	return b
    +}
    +
    +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the NodeSelector field is set to the value of the last call.
    +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
    +	b.NodeSelector = value
    +	return b
    +}
    +
    +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the AllNodes field is set to the value of the last call.
    +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration {
    +	b.AllNodes = &value
    +	return b
    +}
    +
    +// WithDevices adds the given value to the Devices field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Devices field.
    +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithDevices")
    +		}
    +		b.Devices = append(b.Devices, *values[i])
    +	}
    +	return b
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
    index b57e8ba57d..f2f135abc6 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use
    +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
     // with apply.
     type PriorityClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct {
     	PreemptionPolicy                 *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
     }
     
    -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with
    +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
     // apply.
     func PriorityClass(name string) *PriorityClassApplyConfiguration {
     	b := &PriorityClassApplyConfiguration{}
    @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
     	b.PreemptionPolicy = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
    index 0cd09d5d1c..098517675e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use
    +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
     // with apply.
     type PriorityClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct {
     	PreemptionPolicy                 *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
     }
     
    -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with
    +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
     // apply.
     func PriorityClass(name string) *PriorityClassApplyConfiguration {
     	b := &PriorityClassApplyConfiguration{}
    @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
     	b.PreemptionPolicy = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
    index 98cfb14c70..075862fe3e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use
    +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
     // with apply.
     type PriorityClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct {
     	PreemptionPolicy                 *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
     }
     
    -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with
    +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
     // apply.
     func PriorityClass(name string) *PriorityClassApplyConfiguration {
     	b := &PriorityClassApplyConfiguration{}
    @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
     	b.PreemptionPolicy = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *PriorityClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
    index aeead0861c..39d8357029 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use
    +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use
     // with apply.
     type CSIDriverApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct {
     	Spec                             *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with
    +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with
     // apply.
     func CSIDriver(name string) *CSIDriverApplyConfiguration {
     	b := &CSIDriverApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSIDriverApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
    index a1ef00656b..b2dcb0feea 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
    @@ -22,7 +22,7 @@ import (
     	v1 "k8s.io/api/storage/v1"
     )
     
    -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use
    +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
     // with apply.
     type CSIDriverSpecApplyConfiguration struct {
     	AttachRequired       *bool                            `json:"attachRequired,omitempty"`
    @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct {
     	SELinuxMount         *bool                            `json:"seLinuxMount,omitempty"`
     }
     
    -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with
    +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with
     // apply.
     func CSIDriverSpec() *CSIDriverSpecApplyConfiguration {
     	return &CSIDriverSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
    index d8296e4856..8a53e7984e 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use
    +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use
     // with apply.
     type CSINodeApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct {
     	Spec                             *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// CSINode constructs an declarative configuration of the CSINode type for use with
    +// CSINode constructs a declarative configuration of the CSINode type for use with
     // apply.
     func CSINode(name string) *CSINodeApplyConfiguration {
     	b := &CSINodeApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSINodeApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
    index 6219ef1151..8c69e435e7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use
    +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use
     // with apply.
     type CSINodeDriverApplyConfiguration struct {
     	Name         *string                                `json:"name,omitempty"`
    @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct {
     	Allocatable  *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
     }
     
    -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with
    +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with
     // apply.
     func CSINodeDriver() *CSINodeDriverApplyConfiguration {
     	return &CSINodeDriverApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
    index 30d1d4546b..21d3ba7ccc 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use
    +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use
     // with apply.
     type CSINodeSpecApplyConfiguration struct {
     	Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"`
     }
     
    -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with
    +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with
     // apply.
     func CSINodeSpec() *CSINodeSpecApplyConfiguration {
     	return &CSINodeSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
    index c47c6b8215..0e293248d9 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use
    +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
     // with apply.
     type CSIStorageCapacityApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct {
     	MaximumVolumeSize                *resource.Quantity                  `json:"maximumVolumeSize,omitempty"`
     }
     
    -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with
    +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
     // apply.
     func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration {
     	b := &CSIStorageCapacityApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
     	b.MaximumVolumeSize = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
    index 98c4c22336..26d70bc8b0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
    @@ -29,7 +29,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use
    +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use
     // with apply.
     type StorageClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct {
     	AllowedTopologies                []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
     }
     
    -// StorageClass constructs an declarative configuration of the StorageClass type for use with
    +// StorageClass constructs a declarative configuration of the StorageClass type for use with
     // apply.
     func StorageClass(name string) *StorageClassApplyConfiguration {
     	b := &StorageClassApplyConfiguration{}
    @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StorageClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
    index 6665a1ff2e..77b96db2f0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1
     
    -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use
    +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use
     // with apply.
     type TokenRequestApplyConfiguration struct {
     	Audience          *string `json:"audience,omitempty"`
     	ExpirationSeconds *int64  `json:"expirationSeconds,omitempty"`
     }
     
    -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with
    +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with
     // apply.
     func TokenRequest() *TokenRequestApplyConfiguration {
     	return &TokenRequestApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
    index 4c74f09aa2..72c351208c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use
    +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
     // with apply.
     type VolumeAttachmentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct {
     	Status                           *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with
    +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
     // apply.
     func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
     	b := &VolumeAttachmentApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
    index 2bf3f7720d..4778553986 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use
    +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
     // with apply.
     type VolumeAttachmentSourceApplyConfiguration struct {
     	PersistentVolumeName *string                                    `json:"persistentVolumeName,omitempty"`
     	InlineVolumeSpec     *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
     }
     
    -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with
    +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
     // apply.
     func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration {
     	return &VolumeAttachmentSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
    index a55f7c8ea1..8965392352 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use
    +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
     // with apply.
     type VolumeAttachmentSpecApplyConfiguration struct {
     	Attacher *string                                   `json:"attacher,omitempty"`
    @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct {
     	NodeName *string                                   `json:"nodeName,omitempty"`
     }
     
    -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with
    +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
     // apply.
     func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration {
     	return &VolumeAttachmentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
    index 015b08e6eb..14293376d0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1
     
    -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use
    +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
     // with apply.
     type VolumeAttachmentStatusApplyConfiguration struct {
     	Attached           *bool                          `json:"attached,omitempty"`
    @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct {
     	DetachError        *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
     }
     
    -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with
    +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
     // apply.
     func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration {
     	return &VolumeAttachmentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
    index 4bf829f8a9..039e5f32bf 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use
    +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
     // with apply.
     type VolumeErrorApplyConfiguration struct {
     	Time    *v1.Time `json:"time,omitempty"`
     	Message *string  `json:"message,omitempty"`
     }
     
    -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with
    +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
     // apply.
     func VolumeError() *VolumeErrorApplyConfiguration {
     	return &VolumeErrorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
    index 3c5fd3dc29..735853c48b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1
     
    -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use
    +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use
     // with apply.
     type VolumeNodeResourcesApplyConfiguration struct {
     	Count *int32 `json:"count,omitempty"`
     }
     
    -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with
    +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with
     // apply.
     func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration {
     	return &VolumeNodeResourcesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
    index 8b810fed10..aa949e28c7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use
    +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
     // with apply.
     type CSIStorageCapacityApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct {
     	MaximumVolumeSize                *resource.Quantity                  `json:"maximumVolumeSize,omitempty"`
     }
     
    -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with
    +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
     // apply.
     func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration {
     	b := &CSIStorageCapacityApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
     	b.MaximumVolumeSize = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
    index bcefb5778a..9648621ac3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use
    +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
     // with apply.
     type VolumeAttachmentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct {
     	Status                           *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with
    +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
     // apply.
     func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
     	b := &VolumeAttachmentApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
    index 82872cc355..be7da5dd15 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use
    +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
     // with apply.
     type VolumeAttachmentSourceApplyConfiguration struct {
     	PersistentVolumeName *string                                    `json:"persistentVolumeName,omitempty"`
     	InlineVolumeSpec     *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
     }
     
    -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with
    +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
     // apply.
     func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration {
     	return &VolumeAttachmentSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
    index 2710ff8864..e97487a645 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use
    +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
     // with apply.
     type VolumeAttachmentSpecApplyConfiguration struct {
     	Attacher *string                                   `json:"attacher,omitempty"`
    @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct {
     	NodeName *string                                   `json:"nodeName,omitempty"`
     }
     
    -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with
    +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
     // apply.
     func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration {
     	return &VolumeAttachmentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
    index 43803496e8..a287fc6b28 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use
    +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
     // with apply.
     type VolumeAttachmentStatusApplyConfiguration struct {
     	Attached           *bool                          `json:"attached,omitempty"`
    @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct {
     	DetachError        *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
     }
     
    -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with
    +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
     // apply.
     func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration {
     	return &VolumeAttachmentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
    index 9d4c476259..f95bc55477 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use
    +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use
     // with apply.
     type VolumeAttributesClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type VolumeAttributesClassApplyConfiguration struct {
     	Parameters                       map[string]string `json:"parameters,omitempty"`
     }
     
    -// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with
    +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with
     // apply.
     func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration {
     	b := &VolumeAttributesClassApplyConfiguration{}
    @@ -260,3 +260,9 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
    index cbff16fd0c..ef8f6bbe64 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use
    +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
     // with apply.
     type VolumeErrorApplyConfiguration struct {
     	Time    *v1.Time `json:"time,omitempty"`
     	Message *string  `json:"message,omitempty"`
     }
     
    -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with
    +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
     // apply.
     func VolumeError() *VolumeErrorApplyConfiguration {
     	return &VolumeErrorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
    index 4266f0b6e4..b9a807bd8a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use
    +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use
     // with apply.
     type CSIDriverApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct {
     	Spec                             *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with
    +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with
     // apply.
     func CSIDriver(name string) *CSIDriverApplyConfiguration {
     	b := &CSIDriverApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSIDriverApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
    index 6097a615be..5f4e068f0c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
    @@ -22,7 +22,7 @@ import (
     	v1beta1 "k8s.io/api/storage/v1beta1"
     )
     
    -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use
    +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
     // with apply.
     type CSIDriverSpecApplyConfiguration struct {
     	AttachRequired       *bool                            `json:"attachRequired,omitempty"`
    @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct {
     	SELinuxMount         *bool                            `json:"seLinuxMount,omitempty"`
     }
     
    -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with
    +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with
     // apply.
     func CSIDriverSpec() *CSIDriverSpecApplyConfiguration {
     	return &CSIDriverSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
    index 91588fd9fb..af0f41cf0a 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use
    +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use
     // with apply.
     type CSINodeApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct {
     	Spec                             *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
     }
     
    -// CSINode constructs an declarative configuration of the CSINode type for use with
    +// CSINode constructs a declarative configuration of the CSINode type for use with
     // apply.
     func CSINode(name string) *CSINodeApplyConfiguration {
     	b := &CSINodeApplyConfiguration{}
    @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio
     	b.Spec = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSINodeApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
    index 2c7de497b2..65ad771bb2 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use
    +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use
     // with apply.
     type CSINodeDriverApplyConfiguration struct {
     	Name         *string                                `json:"name,omitempty"`
    @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct {
     	Allocatable  *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
     }
     
    -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with
    +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with
     // apply.
     func CSINodeDriver() *CSINodeDriverApplyConfiguration {
     	return &CSINodeDriverApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
    index 94ff1b4611..c9cbea1d9c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use
    +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use
     // with apply.
     type CSINodeSpecApplyConfiguration struct {
     	Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"`
     }
     
    -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with
    +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with
     // apply.
     func CSINodeSpec() *CSINodeSpecApplyConfiguration {
     	return &CSINodeSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
    index 2854a15da7..19350e5a6f 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
    @@ -28,7 +28,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use
    +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
     // with apply.
     type CSIStorageCapacityApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct {
     	MaximumVolumeSize                *resource.Quantity                  `json:"maximumVolumeSize,omitempty"`
     }
     
    -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with
    +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
     // apply.
     func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration {
     	b := &CSIStorageCapacityApplyConfiguration{}
    @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
     	b.MaximumVolumeSize = &value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
    index 02194f1080..fa504a44ec 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
    @@ -29,7 +29,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use
    +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use
     // with apply.
     type StorageClassApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct {
     	AllowedTopologies                []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
     }
     
    -// StorageClass constructs an declarative configuration of the StorageClass type for use with
    +// StorageClass constructs a declarative configuration of the StorageClass type for use with
     // apply.
     func StorageClass(name string) *StorageClassApplyConfiguration {
     	b := &StorageClassApplyConfiguration{}
    @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc
     	}
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StorageClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
    index 89c99d5602..e0f2df28e0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1beta1
     
    -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use
    +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use
     // with apply.
     type TokenRequestApplyConfiguration struct {
     	Audience          *string `json:"audience,omitempty"`
     	ExpirationSeconds *int64  `json:"expirationSeconds,omitempty"`
     }
     
    -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with
    +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with
     // apply.
     func TokenRequest() *TokenRequestApplyConfiguration {
     	return &TokenRequestApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
    index 9fccaf5cf9..b0711d7314 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use
    +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
     // with apply.
     type VolumeAttachmentApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct {
     	Status                           *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with
    +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
     // apply.
     func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
     	b := &VolumeAttachmentApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
    index 9700b38ee2..b08dd3148b 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/core/v1"
     )
     
    -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use
    +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
     // with apply.
     type VolumeAttachmentSourceApplyConfiguration struct {
     	PersistentVolumeName *string                                    `json:"persistentVolumeName,omitempty"`
     	InlineVolumeSpec     *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
     }
     
    -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with
    +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
     // apply.
     func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration {
     	return &VolumeAttachmentSourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
    index 1d5e304bb5..3bdaeb45d7 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use
    +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
     // with apply.
     type VolumeAttachmentSpecApplyConfiguration struct {
     	Attacher *string                                   `json:"attacher,omitempty"`
    @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct {
     	NodeName *string                                   `json:"nodeName,omitempty"`
     }
     
    -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with
    +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
     // apply.
     func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration {
     	return &VolumeAttachmentSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
    index fa1855a241..f7046cdb35 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1beta1
     
    -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use
    +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
     // with apply.
     type VolumeAttachmentStatusApplyConfiguration struct {
     	Attached           *bool                          `json:"attached,omitempty"`
    @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct {
     	DetachError        *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
     }
     
    -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with
    +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
     // apply.
     func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration {
     	return &VolumeAttachmentStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..7b221d2775
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
    @@ -0,0 +1,268 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	managedfields "k8s.io/apimachinery/pkg/util/managedfields"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +)
    +
    +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use
    +// with apply.
    +type VolumeAttributesClassApplyConfiguration struct {
    +	v1.TypeMetaApplyConfiguration    `json:",inline"`
    +	*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
    +	DriverName                       *string           `json:"driverName,omitempty"`
    +	Parameters                       map[string]string `json:"parameters,omitempty"`
    +}
    +
    +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with
    +// apply.
    +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration {
    +	b := &VolumeAttributesClassApplyConfiguration{}
    +	b.WithName(name)
    +	b.WithKind("VolumeAttributesClass")
    +	b.WithAPIVersion("storage.k8s.io/v1beta1")
    +	return b
    +}
    +
    +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
    +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
    +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
    +// APIVersion and Kind populated. It is possible that no managed fields were found for because other
    +// field managers have taken ownership of all the fields previously owned by fieldManager, or because
    +// the fieldManager never owned fields any fields.
    +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
    +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
    +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
    +// applied if another fieldManager has updated or force applied any of the previously applied fields.
    +// Experimental!
    +func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
    +	return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
    +}
    +
    +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
    +// that it extracts the status subresource applied configuration.
    +// Experimental!
    +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
    +	return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
    +}
    +
    +func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
    +	b := &VolumeAttributesClassApplyConfiguration{}
    +	err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource)
    +	if err != nil {
    +		return nil, err
    +	}
    +	b.WithName(volumeAttributesClass.Name)
    +
    +	b.WithKind("VolumeAttributesClass")
    +	b.WithAPIVersion("storage.k8s.io/v1beta1")
    +	return b, nil
    +}
    +
    +// WithKind sets the Kind field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Kind field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.Kind = &value
    +	return b
    +}
    +
    +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the APIVersion field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.APIVersion = &value
    +	return b
    +}
    +
    +// WithName sets the Name field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Name field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Name = &value
    +	return b
    +}
    +
    +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the GenerateName field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.GenerateName = &value
    +	return b
    +}
    +
    +// WithNamespace sets the Namespace field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Namespace field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Namespace = &value
    +	return b
    +}
    +
    +// WithUID sets the UID field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the UID field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.UID = &value
    +	return b
    +}
    +
    +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the ResourceVersion field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.ResourceVersion = &value
    +	return b
    +}
    +
    +// WithGeneration sets the Generation field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the Generation field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.Generation = &value
    +	return b
    +}
    +
    +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the CreationTimestamp field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.CreationTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionTimestamp = &value
    +	return b
    +}
    +
    +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	b.DeletionGracePeriodSeconds = &value
    +	return b
    +}
    +
    +// WithLabels puts the entries into the Labels field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Labels field,
    +// overwriting an existing map entries in Labels field with the same key.
    +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Labels == nil && len(entries) > 0 {
    +		b.Labels = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Labels[k] = v
    +	}
    +	return b
    +}
    +
    +// WithAnnotations puts the entries into the Annotations field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Annotations field,
    +// overwriting an existing map entries in Annotations field with the same key.
    +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	if b.Annotations == nil && len(entries) > 0 {
    +		b.Annotations = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Annotations[k] = v
    +	}
    +	return b
    +}
    +
    +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
    +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		if values[i] == nil {
    +			panic("nil value passed to WithOwnerReferences")
    +		}
    +		b.OwnerReferences = append(b.OwnerReferences, *values[i])
    +	}
    +	return b
    +}
    +
    +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, values provided by each call will be appended to the Finalizers field.
    +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	for i := range values {
    +		b.Finalizers = append(b.Finalizers, values[i])
    +	}
    +	return b
    +}
    +
    +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
    +	if b.ObjectMetaApplyConfiguration == nil {
    +		b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
    +	}
    +}
    +
    +// WithDriverName sets the DriverName field in the declarative configuration to the given value
    +// and returns the receiver, so that objects can be built by chaining "With" function invocations.
    +// If called multiple times, the DriverName field is set to the value of the last call.
    +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration {
    +	b.DriverName = &value
    +	return b
    +}
    +
    +// WithParameters puts the entries into the Parameters field in the declarative configuration
    +// and returns the receiver, so that objects can be build by chaining "With" function invocations.
    +// If called multiple times, the entries provided by each call will be put on the Parameters field,
    +// overwriting an existing map entries in Parameters field with the same key.
    +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
    +	if b.Parameters == nil && len(entries) > 0 {
    +		b.Parameters = make(map[string]string, len(entries))
    +	}
    +	for k, v := range entries {
    +		b.Parameters[k] = v
    +	}
    +	return b
    +}
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
    index 3f255fce75..fec1c9ade3 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
    @@ -22,14 +22,14 @@ import (
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use
    +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
     // with apply.
     type VolumeErrorApplyConfiguration struct {
     	Time    *v1.Time `json:"time,omitempty"`
     	Message *string  `json:"message,omitempty"`
     }
     
    -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with
    +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
     // apply.
     func VolumeError() *VolumeErrorApplyConfiguration {
     	return &VolumeErrorApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
    index 4b69b64c9b..b42c9decc0 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
    @@ -18,13 +18,13 @@ limitations under the License.
     
     package v1beta1
     
    -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use
    +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use
     // with apply.
     type VolumeNodeResourcesApplyConfiguration struct {
     	Count *int32 `json:"count,omitempty"`
     }
     
    -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with
    +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with
     // apply.
     func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration {
     	return &VolumeNodeResourcesApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go
    index c733ac5c04..c8f9f009a5 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go
    @@ -18,7 +18,7 @@ limitations under the License.
     
     package v1alpha1
     
    -// GroupVersionResourceApplyConfiguration represents an declarative configuration of the GroupVersionResource type for use
    +// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use
     // with apply.
     type GroupVersionResourceApplyConfiguration struct {
     	Group    *string `json:"group,omitempty"`
    @@ -26,7 +26,7 @@ type GroupVersionResourceApplyConfiguration struct {
     	Resource *string `json:"resource,omitempty"`
     }
     
    -// GroupVersionResourceApplyConfiguration constructs an declarative configuration of the GroupVersionResource type for use with
    +// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with
     // apply.
     func GroupVersionResource() *GroupVersionResourceApplyConfiguration {
     	return &GroupVersionResourceApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
    index d0f863446e..dcdbc60c7c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
    @@ -24,7 +24,7 @@ import (
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     )
     
    -// MigrationConditionApplyConfiguration represents an declarative configuration of the MigrationCondition type for use
    +// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use
     // with apply.
     type MigrationConditionApplyConfiguration struct {
     	Type           *v1alpha1.MigrationConditionType `json:"type,omitempty"`
    @@ -34,7 +34,7 @@ type MigrationConditionApplyConfiguration struct {
     	Message        *string                          `json:"message,omitempty"`
     }
     
    -// MigrationConditionApplyConfiguration constructs an declarative configuration of the MigrationCondition type for use with
    +// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with
     // apply.
     func MigrationCondition() *MigrationConditionApplyConfiguration {
     	return &MigrationConditionApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
    index cc57b2b126..7e6452a777 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
    @@ -27,7 +27,7 @@ import (
     	v1 "k8s.io/client-go/applyconfigurations/meta/v1"
     )
     
    -// StorageVersionMigrationApplyConfiguration represents an declarative configuration of the StorageVersionMigration type for use
    +// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use
     // with apply.
     type StorageVersionMigrationApplyConfiguration struct {
     	v1.TypeMetaApplyConfiguration    `json:",inline"`
    @@ -36,7 +36,7 @@ type StorageVersionMigrationApplyConfiguration struct {
     	Status                           *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"`
     }
     
    -// StorageVersionMigration constructs an declarative configuration of the StorageVersionMigration type for use with
    +// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with
     // apply.
     func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfiguration {
     	b := &StorageVersionMigrationApplyConfiguration{}
    @@ -254,3 +254,9 @@ func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVer
     	b.Status = value
     	return b
     }
    +
    +// GetName retrieves the value of the Name field in the declarative configuration.
    +func (b *StorageVersionMigrationApplyConfiguration) GetName() *string {
    +	b.ensureObjectMetaApplyConfigurationExists()
    +	return b.Name
    +}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go
    index 6c7c5b2645..02ddb540f8 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// StorageVersionMigrationSpecApplyConfiguration represents an declarative configuration of the StorageVersionMigrationSpec type for use
    +// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use
     // with apply.
     type StorageVersionMigrationSpecApplyConfiguration struct {
     	Resource      *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"`
     	ContinueToken *string                                 `json:"continueToken,omitempty"`
     }
     
    -// StorageVersionMigrationSpecApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationSpec type for use with
    +// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with
     // apply.
     func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguration {
     	return &StorageVersionMigrationSpecApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go
    index b8d397548a..fc957cb15c 100644
    --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go
    +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go
    @@ -18,14 +18,14 @@ limitations under the License.
     
     package v1alpha1
     
    -// StorageVersionMigrationStatusApplyConfiguration represents an declarative configuration of the StorageVersionMigrationStatus type for use
    +// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use
     // with apply.
     type StorageVersionMigrationStatusApplyConfiguration struct {
     	Conditions      []MigrationConditionApplyConfiguration `json:"conditions,omitempty"`
     	ResourceVersion *string                                `json:"resourceVersion,omitempty"`
     }
     
    -// StorageVersionMigrationStatusApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationStatus type for use with
    +// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with
     // apply.
     func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfiguration {
     	return &StorageVersionMigrationStatusApplyConfiguration{}
    diff --git a/vendor/k8s.io/client-go/applyconfigurations/utils.go b/vendor/k8s.io/client-go/applyconfigurations/utils.go
    new file mode 100644
    index 0000000000..0955b8f44f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/applyconfigurations/utils.go
    @@ -0,0 +1,1740 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by applyconfiguration-gen. DO NOT EDIT.
    +
    +package applyconfigurations
    +
    +import (
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
    +	appsv1 "k8s.io/api/apps/v1"
    +	appsv1beta1 "k8s.io/api/apps/v1beta1"
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	autoscalingv1 "k8s.io/api/autoscaling/v1"
    +	v2 "k8s.io/api/autoscaling/v2"
    +	v2beta1 "k8s.io/api/autoscaling/v2beta1"
    +	v2beta2 "k8s.io/api/autoscaling/v2beta2"
    +	batchv1 "k8s.io/api/batch/v1"
    +	batchv1beta1 "k8s.io/api/batch/v1beta1"
    +	certificatesv1 "k8s.io/api/certificates/v1"
    +	certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
    +	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
    +	coordinationv1 "k8s.io/api/coordination/v1"
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
    +	corev1 "k8s.io/api/core/v1"
    +	discoveryv1 "k8s.io/api/discovery/v1"
    +	discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
    +	eventsv1 "k8s.io/api/events/v1"
    +	eventsv1beta1 "k8s.io/api/events/v1beta1"
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	flowcontrolv1 "k8s.io/api/flowcontrol/v1"
    +	flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
    +	networkingv1 "k8s.io/api/networking/v1"
    +	networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	nodev1 "k8s.io/api/node/v1"
    +	nodev1alpha1 "k8s.io/api/node/v1alpha1"
    +	nodev1beta1 "k8s.io/api/node/v1beta1"
    +	policyv1 "k8s.io/api/policy/v1"
    +	policyv1beta1 "k8s.io/api/policy/v1beta1"
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	schedulingv1 "k8s.io/api/scheduling/v1"
    +	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
    +	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
    +	storagev1 "k8s.io/api/storage/v1"
    +	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	schema "k8s.io/apimachinery/pkg/runtime/schema"
    +	admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
    +	admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
    +	admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
    +	applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1"
    +	applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
    +	applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
    +	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2"
    +	autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1"
    +	autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2"
    +	applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
    +	applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1"
    +	applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1"
    +	applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
    +	applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
    +	applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1"
    +	applyconfigurationscoordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1"
    +	applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1"
    +	applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1"
    +	applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1"
    +	applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1"
    +	applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1"
    +	applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
    +	applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
    +	applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
    +	flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
    +	applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1"
    +	internal "k8s.io/client-go/applyconfigurations/internal"
    +	applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
    +	applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
    +	applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
    +	applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1"
    +	applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
    +	applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1"
    +	applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1"
    +	applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1"
    +	applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
    +	applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
    +	applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1"
    +	applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
    +	applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1"
    +	applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
    +	applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no
    +// apply configuration type exists for the given GroupVersionKind.
    +func ForKind(kind schema.GroupVersionKind) interface{} {
    +	switch kind {
    +	// Group=admissionregistration.k8s.io, Version=v1
    +	case v1.SchemeGroupVersion.WithKind("AuditAnnotation"):
    +		return &admissionregistrationv1.AuditAnnotationApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ExpressionWarning"):
    +		return &admissionregistrationv1.ExpressionWarningApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("MatchCondition"):
    +		return &admissionregistrationv1.MatchConditionApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("MatchResources"):
    +		return &admissionregistrationv1.MatchResourcesApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("MutatingWebhook"):
    +		return &admissionregistrationv1.MutatingWebhookApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"):
    +		return &admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"):
    +		return &admissionregistrationv1.NamedRuleWithOperationsApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ParamKind"):
    +		return &admissionregistrationv1.ParamKindApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ParamRef"):
    +		return &admissionregistrationv1.ParamRefApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("Rule"):
    +		return &admissionregistrationv1.RuleApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("RuleWithOperations"):
    +		return &admissionregistrationv1.RuleWithOperationsApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ServiceReference"):
    +		return &admissionregistrationv1.ServiceReferenceApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("TypeChecking"):
    +		return &admissionregistrationv1.TypeCheckingApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"):
    +		return &admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"):
    +		return &admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"):
    +		return &admissionregistrationv1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"):
    +		return &admissionregistrationv1.ValidatingAdmissionPolicySpecApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"):
    +		return &admissionregistrationv1.ValidatingAdmissionPolicyStatusApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingWebhook"):
    +		return &admissionregistrationv1.ValidatingWebhookApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"):
    +		return &admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("Validation"):
    +		return &admissionregistrationv1.ValidationApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("Variable"):
    +		return &admissionregistrationv1.VariableApplyConfiguration{}
    +	case v1.SchemeGroupVersion.WithKind("WebhookClientConfig"):
    +		return &admissionregistrationv1.WebhookClientConfigApplyConfiguration{}
    +
    +		// Group=admissionregistration.k8s.io, Version=v1alpha1
    +	case v1alpha1.SchemeGroupVersion.WithKind("AuditAnnotation"):
    +		return &admissionregistrationv1alpha1.AuditAnnotationApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ExpressionWarning"):
    +		return &admissionregistrationv1alpha1.ExpressionWarningApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("MatchCondition"):
    +		return &admissionregistrationv1alpha1.MatchConditionApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("MatchResources"):
    +		return &admissionregistrationv1alpha1.MatchResourcesApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"):
    +		return &admissionregistrationv1alpha1.NamedRuleWithOperationsApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ParamKind"):
    +		return &admissionregistrationv1alpha1.ParamKindApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ParamRef"):
    +		return &admissionregistrationv1alpha1.ParamRefApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("TypeChecking"):
    +		return &admissionregistrationv1alpha1.TypeCheckingApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"):
    +		return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"):
    +		return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"):
    +		return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"):
    +		return &admissionregistrationv1alpha1.ValidatingAdmissionPolicySpecApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"):
    +		return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatusApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("Validation"):
    +		return &admissionregistrationv1alpha1.ValidationApplyConfiguration{}
    +	case v1alpha1.SchemeGroupVersion.WithKind("Variable"):
    +		return &admissionregistrationv1alpha1.VariableApplyConfiguration{}
    +
    +		// Group=admissionregistration.k8s.io, Version=v1beta1
    +	case v1beta1.SchemeGroupVersion.WithKind("AuditAnnotation"):
    +		return &admissionregistrationv1beta1.AuditAnnotationApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ExpressionWarning"):
    +		return &admissionregistrationv1beta1.ExpressionWarningApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("MatchCondition"):
    +		return &admissionregistrationv1beta1.MatchConditionApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("MatchResources"):
    +		return &admissionregistrationv1beta1.MatchResourcesApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhook"):
    +		return &admissionregistrationv1beta1.MutatingWebhookApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"):
    +		return &admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"):
    +		return &admissionregistrationv1beta1.NamedRuleWithOperationsApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ParamKind"):
    +		return &admissionregistrationv1beta1.ParamKindApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ParamRef"):
    +		return &admissionregistrationv1beta1.ParamRefApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ServiceReference"):
    +		return &admissionregistrationv1beta1.ServiceReferenceApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("TypeChecking"):
    +		return &admissionregistrationv1beta1.TypeCheckingApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"):
    +		return &admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"):
    +		return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"):
    +		return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"):
    +		return &admissionregistrationv1beta1.ValidatingAdmissionPolicySpecApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"):
    +		return &admissionregistrationv1beta1.ValidatingAdmissionPolicyStatusApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhook"):
    +		return &admissionregistrationv1beta1.ValidatingWebhookApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"):
    +		return &admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("Validation"):
    +		return &admissionregistrationv1beta1.ValidationApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("Variable"):
    +		return &admissionregistrationv1beta1.VariableApplyConfiguration{}
    +	case v1beta1.SchemeGroupVersion.WithKind("WebhookClientConfig"):
    +		return &admissionregistrationv1beta1.WebhookClientConfigApplyConfiguration{}
    +
    +		// Group=apps, Version=v1
    +	case appsv1.SchemeGroupVersion.WithKind("ControllerRevision"):
    +		return &applyconfigurationsappsv1.ControllerRevisionApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DaemonSet"):
    +		return &applyconfigurationsappsv1.DaemonSetApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DaemonSetCondition"):
    +		return &applyconfigurationsappsv1.DaemonSetConditionApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DaemonSetSpec"):
    +		return &applyconfigurationsappsv1.DaemonSetSpecApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DaemonSetStatus"):
    +		return &applyconfigurationsappsv1.DaemonSetStatusApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"):
    +		return &applyconfigurationsappsv1.DaemonSetUpdateStrategyApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("Deployment"):
    +		return &applyconfigurationsappsv1.DeploymentApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DeploymentCondition"):
    +		return &applyconfigurationsappsv1.DeploymentConditionApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DeploymentSpec"):
    +		return &applyconfigurationsappsv1.DeploymentSpecApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DeploymentStatus"):
    +		return &applyconfigurationsappsv1.DeploymentStatusApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("DeploymentStrategy"):
    +		return &applyconfigurationsappsv1.DeploymentStrategyApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("ReplicaSet"):
    +		return &applyconfigurationsappsv1.ReplicaSetApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("ReplicaSetCondition"):
    +		return &applyconfigurationsappsv1.ReplicaSetConditionApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("ReplicaSetSpec"):
    +		return &applyconfigurationsappsv1.ReplicaSetSpecApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("ReplicaSetStatus"):
    +		return &applyconfigurationsappsv1.ReplicaSetStatusApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"):
    +		return &applyconfigurationsappsv1.RollingUpdateDaemonSetApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"):
    +		return &applyconfigurationsappsv1.RollingUpdateDeploymentApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"):
    +		return &applyconfigurationsappsv1.RollingUpdateStatefulSetStrategyApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSet"):
    +		return &applyconfigurationsappsv1.StatefulSetApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetCondition"):
    +		return &applyconfigurationsappsv1.StatefulSetConditionApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"):
    +		return &applyconfigurationsappsv1.StatefulSetOrdinalsApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"):
    +		return &applyconfigurationsappsv1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetSpec"):
    +		return &applyconfigurationsappsv1.StatefulSetSpecApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetStatus"):
    +		return &applyconfigurationsappsv1.StatefulSetStatusApplyConfiguration{}
    +	case appsv1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"):
    +		return &applyconfigurationsappsv1.StatefulSetUpdateStrategyApplyConfiguration{}
    +
    +		// Group=apps, Version=v1beta1
    +	case appsv1beta1.SchemeGroupVersion.WithKind("ControllerRevision"):
    +		return &applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"):
    +		return &applyconfigurationsappsv1beta1.DeploymentApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"):
    +		return &applyconfigurationsappsv1beta1.DeploymentConditionApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"):
    +		return &applyconfigurationsappsv1beta1.DeploymentSpecApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"):
    +		return &applyconfigurationsappsv1beta1.DeploymentStatusApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"):
    +		return &applyconfigurationsappsv1beta1.DeploymentStrategyApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"):
    +		return &applyconfigurationsappsv1beta1.RollbackConfigApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"):
    +		return &applyconfigurationsappsv1beta1.RollingUpdateDeploymentApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"):
    +		return &applyconfigurationsappsv1beta1.RollingUpdateStatefulSetStrategyApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetCondition"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetConditionApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetOrdinalsApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetSpec"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetSpecApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetStatus"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetStatusApplyConfiguration{}
    +	case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"):
    +		return &applyconfigurationsappsv1beta1.StatefulSetUpdateStrategyApplyConfiguration{}
    +
    +		// Group=apps, Version=v1beta2
    +	case v1beta2.SchemeGroupVersion.WithKind("ControllerRevision"):
    +		return &appsv1beta2.ControllerRevisionApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DaemonSet"):
    +		return &appsv1beta2.DaemonSetApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DaemonSetCondition"):
    +		return &appsv1beta2.DaemonSetConditionApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DaemonSetSpec"):
    +		return &appsv1beta2.DaemonSetSpecApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DaemonSetStatus"):
    +		return &appsv1beta2.DaemonSetStatusApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"):
    +		return &appsv1beta2.DaemonSetUpdateStrategyApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("Deployment"):
    +		return &appsv1beta2.DeploymentApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DeploymentCondition"):
    +		return &appsv1beta2.DeploymentConditionApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DeploymentSpec"):
    +		return &appsv1beta2.DeploymentSpecApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DeploymentStatus"):
    +		return &appsv1beta2.DeploymentStatusApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("DeploymentStrategy"):
    +		return &appsv1beta2.DeploymentStrategyApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("ReplicaSet"):
    +		return &appsv1beta2.ReplicaSetApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetCondition"):
    +		return &appsv1beta2.ReplicaSetConditionApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetSpec"):
    +		return &appsv1beta2.ReplicaSetSpecApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetStatus"):
    +		return &appsv1beta2.ReplicaSetStatusApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"):
    +		return &appsv1beta2.RollingUpdateDaemonSetApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDeployment"):
    +		return &appsv1beta2.RollingUpdateDeploymentApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"):
    +		return &appsv1beta2.RollingUpdateStatefulSetStrategyApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("Scale"):
    +		return &appsv1beta2.ScaleApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSet"):
    +		return &appsv1beta2.StatefulSetApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetCondition"):
    +		return &appsv1beta2.StatefulSetConditionApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetOrdinals"):
    +		return &appsv1beta2.StatefulSetOrdinalsApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"):
    +		return &appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetSpec"):
    +		return &appsv1beta2.StatefulSetSpecApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetStatus"):
    +		return &appsv1beta2.StatefulSetStatusApplyConfiguration{}
    +	case v1beta2.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"):
    +		return &appsv1beta2.StatefulSetUpdateStrategyApplyConfiguration{}
    +
    +		// Group=autoscaling, Version=v1
    +	case autoscalingv1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"):
    +		return &applyconfigurationsautoscalingv1.CrossVersionObjectReferenceApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"):
    +		return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"):
    +		return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerSpecApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"):
    +		return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerStatusApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("Scale"):
    +		return &applyconfigurationsautoscalingv1.ScaleApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("ScaleSpec"):
    +		return &applyconfigurationsautoscalingv1.ScaleSpecApplyConfiguration{}
    +	case autoscalingv1.SchemeGroupVersion.WithKind("ScaleStatus"):
    +		return &applyconfigurationsautoscalingv1.ScaleStatusApplyConfiguration{}
    +
    +		// Group=autoscaling, Version=v2
    +	case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"):
    +		return &autoscalingv2.ContainerResourceMetricSourceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"):
    +		return &autoscalingv2.ContainerResourceMetricStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"):
    +		return &autoscalingv2.CrossVersionObjectReferenceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ExternalMetricSource"):
    +		return &autoscalingv2.ExternalMetricSourceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ExternalMetricStatus"):
    +		return &autoscalingv2.ExternalMetricStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"):
    +		return &autoscalingv2.HorizontalPodAutoscalerApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"):
    +		return &autoscalingv2.HorizontalPodAutoscalerBehaviorApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"):
    +		return &autoscalingv2.HorizontalPodAutoscalerConditionApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"):
    +		return &autoscalingv2.HorizontalPodAutoscalerSpecApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"):
    +		return &autoscalingv2.HorizontalPodAutoscalerStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HPAScalingPolicy"):
    +		return &autoscalingv2.HPAScalingPolicyApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("HPAScalingRules"):
    +		return &autoscalingv2.HPAScalingRulesApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("MetricIdentifier"):
    +		return &autoscalingv2.MetricIdentifierApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("MetricSpec"):
    +		return &autoscalingv2.MetricSpecApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("MetricStatus"):
    +		return &autoscalingv2.MetricStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("MetricTarget"):
    +		return &autoscalingv2.MetricTargetApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("MetricValueStatus"):
    +		return &autoscalingv2.MetricValueStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ObjectMetricSource"):
    +		return &autoscalingv2.ObjectMetricSourceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ObjectMetricStatus"):
    +		return &autoscalingv2.ObjectMetricStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("PodsMetricSource"):
    +		return &autoscalingv2.PodsMetricSourceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("PodsMetricStatus"):
    +		return &autoscalingv2.PodsMetricStatusApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ResourceMetricSource"):
    +		return &autoscalingv2.ResourceMetricSourceApplyConfiguration{}
    +	case v2.SchemeGroupVersion.WithKind("ResourceMetricStatus"):
    +		return &autoscalingv2.ResourceMetricStatusApplyConfiguration{}
    +
    +		// Group=autoscaling, Version=v2beta1
    +	case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"):
    +		return &autoscalingv2beta1.ContainerResourceMetricSourceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"):
    +		return &autoscalingv2beta1.ContainerResourceMetricStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"):
    +		return &autoscalingv2beta1.CrossVersionObjectReferenceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricSource"):
    +		return &autoscalingv2beta1.ExternalMetricSourceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricStatus"):
    +		return &autoscalingv2beta1.ExternalMetricStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"):
    +		return &autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"):
    +		return &autoscalingv2beta1.HorizontalPodAutoscalerConditionApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"):
    +		return &autoscalingv2beta1.HorizontalPodAutoscalerSpecApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"):
    +		return &autoscalingv2beta1.HorizontalPodAutoscalerStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("MetricSpec"):
    +		return &autoscalingv2beta1.MetricSpecApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("MetricStatus"):
    +		return &autoscalingv2beta1.MetricStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricSource"):
    +		return &autoscalingv2beta1.ObjectMetricSourceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricStatus"):
    +		return &autoscalingv2beta1.ObjectMetricStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("PodsMetricSource"):
    +		return &autoscalingv2beta1.PodsMetricSourceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("PodsMetricStatus"):
    +		return &autoscalingv2beta1.PodsMetricStatusApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricSource"):
    +		return &autoscalingv2beta1.ResourceMetricSourceApplyConfiguration{}
    +	case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricStatus"):
    +		return &autoscalingv2beta1.ResourceMetricStatusApplyConfiguration{}
    +
    +		// Group=autoscaling, Version=v2beta2
    +	case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"):
    +		return &autoscalingv2beta2.ContainerResourceMetricSourceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"):
    +		return &autoscalingv2beta2.ContainerResourceMetricStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"):
    +		return &autoscalingv2beta2.CrossVersionObjectReferenceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricSource"):
    +		return &autoscalingv2beta2.ExternalMetricSourceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricStatus"):
    +		return &autoscalingv2beta2.ExternalMetricStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"):
    +		return &autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"):
    +		return &autoscalingv2beta2.HorizontalPodAutoscalerBehaviorApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"):
    +		return &autoscalingv2beta2.HorizontalPodAutoscalerConditionApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"):
    +		return &autoscalingv2beta2.HorizontalPodAutoscalerSpecApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"):
    +		return &autoscalingv2beta2.HorizontalPodAutoscalerStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HPAScalingPolicy"):
    +		return &autoscalingv2beta2.HPAScalingPolicyApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("HPAScalingRules"):
    +		return &autoscalingv2beta2.HPAScalingRulesApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("MetricIdentifier"):
    +		return &autoscalingv2beta2.MetricIdentifierApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("MetricSpec"):
    +		return &autoscalingv2beta2.MetricSpecApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("MetricStatus"):
    +		return &autoscalingv2beta2.MetricStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("MetricTarget"):
    +		return &autoscalingv2beta2.MetricTargetApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("MetricValueStatus"):
    +		return &autoscalingv2beta2.MetricValueStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricSource"):
    +		return &autoscalingv2beta2.ObjectMetricSourceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricStatus"):
    +		return &autoscalingv2beta2.ObjectMetricStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("PodsMetricSource"):
    +		return &autoscalingv2beta2.PodsMetricSourceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("PodsMetricStatus"):
    +		return &autoscalingv2beta2.PodsMetricStatusApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricSource"):
    +		return &autoscalingv2beta2.ResourceMetricSourceApplyConfiguration{}
    +	case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricStatus"):
    +		return &autoscalingv2beta2.ResourceMetricStatusApplyConfiguration{}
    +
    +		// Group=batch, Version=v1
    +	case batchv1.SchemeGroupVersion.WithKind("CronJob"):
    +		return &applyconfigurationsbatchv1.CronJobApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("CronJobSpec"):
    +		return &applyconfigurationsbatchv1.CronJobSpecApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("CronJobStatus"):
    +		return &applyconfigurationsbatchv1.CronJobStatusApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("Job"):
    +		return &applyconfigurationsbatchv1.JobApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("JobCondition"):
    +		return &applyconfigurationsbatchv1.JobConditionApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("JobSpec"):
    +		return &applyconfigurationsbatchv1.JobSpecApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("JobStatus"):
    +		return &applyconfigurationsbatchv1.JobStatusApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("JobTemplateSpec"):
    +		return &applyconfigurationsbatchv1.JobTemplateSpecApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicy"):
    +		return &applyconfigurationsbatchv1.PodFailurePolicyApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnExitCodesRequirement"):
    +		return &applyconfigurationsbatchv1.PodFailurePolicyOnExitCodesRequirementApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnPodConditionsPattern"):
    +		return &applyconfigurationsbatchv1.PodFailurePolicyOnPodConditionsPatternApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyRule"):
    +		return &applyconfigurationsbatchv1.PodFailurePolicyRuleApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("SuccessPolicy"):
    +		return &applyconfigurationsbatchv1.SuccessPolicyApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("SuccessPolicyRule"):
    +		return &applyconfigurationsbatchv1.SuccessPolicyRuleApplyConfiguration{}
    +	case batchv1.SchemeGroupVersion.WithKind("UncountedTerminatedPods"):
    +		return &applyconfigurationsbatchv1.UncountedTerminatedPodsApplyConfiguration{}
    +
    +		// Group=batch, Version=v1beta1
    +	case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"):
    +		return &applyconfigurationsbatchv1beta1.CronJobApplyConfiguration{}
    +	case batchv1beta1.SchemeGroupVersion.WithKind("CronJobSpec"):
    +		return &applyconfigurationsbatchv1beta1.CronJobSpecApplyConfiguration{}
    +	case batchv1beta1.SchemeGroupVersion.WithKind("CronJobStatus"):
    +		return &applyconfigurationsbatchv1beta1.CronJobStatusApplyConfiguration{}
    +	case batchv1beta1.SchemeGroupVersion.WithKind("JobTemplateSpec"):
    +		return &applyconfigurationsbatchv1beta1.JobTemplateSpecApplyConfiguration{}
    +
    +		// Group=certificates.k8s.io, Version=v1
    +	case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest"):
    +		return &applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration{}
    +	case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"):
    +		return &applyconfigurationscertificatesv1.CertificateSigningRequestConditionApplyConfiguration{}
    +	case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"):
    +		return &applyconfigurationscertificatesv1.CertificateSigningRequestSpecApplyConfiguration{}
    +	case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"):
    +		return &applyconfigurationscertificatesv1.CertificateSigningRequestStatusApplyConfiguration{}
    +
    +		// Group=certificates.k8s.io, Version=v1alpha1
    +	case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle"):
    +		return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration{}
    +	case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundleSpec"):
    +		return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleSpecApplyConfiguration{}
    +
    +		// Group=certificates.k8s.io, Version=v1beta1
    +	case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest"):
    +		return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration{}
    +	case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"):
    +		return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestConditionApplyConfiguration{}
    +	case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"):
    +		return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestSpecApplyConfiguration{}
    +	case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"):
    +		return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestStatusApplyConfiguration{}
    +
    +		// Group=coordination.k8s.io, Version=v1
    +	case coordinationv1.SchemeGroupVersion.WithKind("Lease"):
    +		return &applyconfigurationscoordinationv1.LeaseApplyConfiguration{}
    +	case coordinationv1.SchemeGroupVersion.WithKind("LeaseSpec"):
    +		return &applyconfigurationscoordinationv1.LeaseSpecApplyConfiguration{}
    +
    +		// Group=coordination.k8s.io, Version=v1alpha1
    +	case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate"):
    +		return &applyconfigurationscoordinationv1alpha1.LeaseCandidateApplyConfiguration{}
    +	case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidateSpec"):
    +		return &applyconfigurationscoordinationv1alpha1.LeaseCandidateSpecApplyConfiguration{}
    +
    +		// Group=coordination.k8s.io, Version=v1beta1
    +	case coordinationv1beta1.SchemeGroupVersion.WithKind("Lease"):
    +		return &applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration{}
    +	case coordinationv1beta1.SchemeGroupVersion.WithKind("LeaseSpec"):
    +		return &applyconfigurationscoordinationv1beta1.LeaseSpecApplyConfiguration{}
    +
    +		// Group=core, Version=v1
    +	case corev1.SchemeGroupVersion.WithKind("Affinity"):
    +		return &applyconfigurationscorev1.AffinityApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AppArmorProfile"):
    +		return &applyconfigurationscorev1.AppArmorProfileApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AttachedVolume"):
    +		return &applyconfigurationscorev1.AttachedVolumeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AWSElasticBlockStoreVolumeSource"):
    +		return &applyconfigurationscorev1.AWSElasticBlockStoreVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AzureDiskVolumeSource"):
    +		return &applyconfigurationscorev1.AzureDiskVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AzureFilePersistentVolumeSource"):
    +		return &applyconfigurationscorev1.AzureFilePersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("AzureFileVolumeSource"):
    +		return &applyconfigurationscorev1.AzureFileVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Capabilities"):
    +		return &applyconfigurationscorev1.CapabilitiesApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CephFSPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.CephFSPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CephFSVolumeSource"):
    +		return &applyconfigurationscorev1.CephFSVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CinderPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.CinderPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CinderVolumeSource"):
    +		return &applyconfigurationscorev1.CinderVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ClientIPConfig"):
    +		return &applyconfigurationscorev1.ClientIPConfigApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ClusterTrustBundleProjection"):
    +		return &applyconfigurationscorev1.ClusterTrustBundleProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ComponentCondition"):
    +		return &applyconfigurationscorev1.ComponentConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ComponentStatus"):
    +		return &applyconfigurationscorev1.ComponentStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMap"):
    +		return &applyconfigurationscorev1.ConfigMapApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMapEnvSource"):
    +		return &applyconfigurationscorev1.ConfigMapEnvSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMapKeySelector"):
    +		return &applyconfigurationscorev1.ConfigMapKeySelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMapNodeConfigSource"):
    +		return &applyconfigurationscorev1.ConfigMapNodeConfigSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMapProjection"):
    +		return &applyconfigurationscorev1.ConfigMapProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ConfigMapVolumeSource"):
    +		return &applyconfigurationscorev1.ConfigMapVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Container"):
    +		return &applyconfigurationscorev1.ContainerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerImage"):
    +		return &applyconfigurationscorev1.ContainerImageApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerPort"):
    +		return &applyconfigurationscorev1.ContainerPortApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerResizePolicy"):
    +		return &applyconfigurationscorev1.ContainerResizePolicyApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerState"):
    +		return &applyconfigurationscorev1.ContainerStateApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerStateRunning"):
    +		return &applyconfigurationscorev1.ContainerStateRunningApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerStateTerminated"):
    +		return &applyconfigurationscorev1.ContainerStateTerminatedApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerStateWaiting"):
    +		return &applyconfigurationscorev1.ContainerStateWaitingApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerStatus"):
    +		return &applyconfigurationscorev1.ContainerStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ContainerUser"):
    +		return &applyconfigurationscorev1.ContainerUserApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CSIPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.CSIPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("CSIVolumeSource"):
    +		return &applyconfigurationscorev1.CSIVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("DaemonEndpoint"):
    +		return &applyconfigurationscorev1.DaemonEndpointApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("DownwardAPIProjection"):
    +		return &applyconfigurationscorev1.DownwardAPIProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeFile"):
    +		return &applyconfigurationscorev1.DownwardAPIVolumeFileApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeSource"):
    +		return &applyconfigurationscorev1.DownwardAPIVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EmptyDirVolumeSource"):
    +		return &applyconfigurationscorev1.EmptyDirVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EndpointAddress"):
    +		return &applyconfigurationscorev1.EndpointAddressApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EndpointPort"):
    +		return &applyconfigurationscorev1.EndpointPortApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Endpoints"):
    +		return &applyconfigurationscorev1.EndpointsApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EndpointSubset"):
    +		return &applyconfigurationscorev1.EndpointSubsetApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EnvFromSource"):
    +		return &applyconfigurationscorev1.EnvFromSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EnvVar"):
    +		return &applyconfigurationscorev1.EnvVarApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EnvVarSource"):
    +		return &applyconfigurationscorev1.EnvVarSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EphemeralContainer"):
    +		return &applyconfigurationscorev1.EphemeralContainerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EphemeralContainerCommon"):
    +		return &applyconfigurationscorev1.EphemeralContainerCommonApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EphemeralVolumeSource"):
    +		return &applyconfigurationscorev1.EphemeralVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Event"):
    +		return &applyconfigurationscorev1.EventApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EventSeries"):
    +		return &applyconfigurationscorev1.EventSeriesApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("EventSource"):
    +		return &applyconfigurationscorev1.EventSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ExecAction"):
    +		return &applyconfigurationscorev1.ExecActionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("FCVolumeSource"):
    +		return &applyconfigurationscorev1.FCVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("FlexPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.FlexPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("FlexVolumeSource"):
    +		return &applyconfigurationscorev1.FlexVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("FlockerVolumeSource"):
    +		return &applyconfigurationscorev1.FlockerVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("GCEPersistentDiskVolumeSource"):
    +		return &applyconfigurationscorev1.GCEPersistentDiskVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("GitRepoVolumeSource"):
    +		return &applyconfigurationscorev1.GitRepoVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("GlusterfsPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.GlusterfsPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("GlusterfsVolumeSource"):
    +		return &applyconfigurationscorev1.GlusterfsVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("GRPCAction"):
    +		return &applyconfigurationscorev1.GRPCActionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("HostAlias"):
    +		return &applyconfigurationscorev1.HostAliasApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("HostIP"):
    +		return &applyconfigurationscorev1.HostIPApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("HostPathVolumeSource"):
    +		return &applyconfigurationscorev1.HostPathVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("HTTPGetAction"):
    +		return &applyconfigurationscorev1.HTTPGetActionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("HTTPHeader"):
    +		return &applyconfigurationscorev1.HTTPHeaderApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ImageVolumeSource"):
    +		return &applyconfigurationscorev1.ImageVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ISCSIPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.ISCSIPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ISCSIVolumeSource"):
    +		return &applyconfigurationscorev1.ISCSIVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("KeyToPath"):
    +		return &applyconfigurationscorev1.KeyToPathApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Lifecycle"):
    +		return &applyconfigurationscorev1.LifecycleApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LifecycleHandler"):
    +		return &applyconfigurationscorev1.LifecycleHandlerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LimitRange"):
    +		return &applyconfigurationscorev1.LimitRangeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LimitRangeItem"):
    +		return &applyconfigurationscorev1.LimitRangeItemApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LimitRangeSpec"):
    +		return &applyconfigurationscorev1.LimitRangeSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LinuxContainerUser"):
    +		return &applyconfigurationscorev1.LinuxContainerUserApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LoadBalancerIngress"):
    +		return &applyconfigurationscorev1.LoadBalancerIngressApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LoadBalancerStatus"):
    +		return &applyconfigurationscorev1.LoadBalancerStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LocalObjectReference"):
    +		return &applyconfigurationscorev1.LocalObjectReferenceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("LocalVolumeSource"):
    +		return &applyconfigurationscorev1.LocalVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ModifyVolumeStatus"):
    +		return &applyconfigurationscorev1.ModifyVolumeStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Namespace"):
    +		return &applyconfigurationscorev1.NamespaceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NamespaceCondition"):
    +		return &applyconfigurationscorev1.NamespaceConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NamespaceSpec"):
    +		return &applyconfigurationscorev1.NamespaceSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NamespaceStatus"):
    +		return &applyconfigurationscorev1.NamespaceStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NFSVolumeSource"):
    +		return &applyconfigurationscorev1.NFSVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Node"):
    +		return &applyconfigurationscorev1.NodeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeAddress"):
    +		return &applyconfigurationscorev1.NodeAddressApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeAffinity"):
    +		return &applyconfigurationscorev1.NodeAffinityApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeCondition"):
    +		return &applyconfigurationscorev1.NodeConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeConfigSource"):
    +		return &applyconfigurationscorev1.NodeConfigSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeConfigStatus"):
    +		return &applyconfigurationscorev1.NodeConfigStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeDaemonEndpoints"):
    +		return &applyconfigurationscorev1.NodeDaemonEndpointsApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeFeatures"):
    +		return &applyconfigurationscorev1.NodeFeaturesApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandler"):
    +		return &applyconfigurationscorev1.NodeRuntimeHandlerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandlerFeatures"):
    +		return &applyconfigurationscorev1.NodeRuntimeHandlerFeaturesApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeSelector"):
    +		return &applyconfigurationscorev1.NodeSelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeSelectorRequirement"):
    +		return &applyconfigurationscorev1.NodeSelectorRequirementApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeSelectorTerm"):
    +		return &applyconfigurationscorev1.NodeSelectorTermApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeSpec"):
    +		return &applyconfigurationscorev1.NodeSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeStatus"):
    +		return &applyconfigurationscorev1.NodeStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("NodeSystemInfo"):
    +		return &applyconfigurationscorev1.NodeSystemInfoApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ObjectFieldSelector"):
    +		return &applyconfigurationscorev1.ObjectFieldSelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ObjectReference"):
    +		return &applyconfigurationscorev1.ObjectReferenceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolume"):
    +		return &applyconfigurationscorev1.PersistentVolumeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimCondition"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimSpec"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimStatus"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimTemplate"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimTemplateApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimVolumeSource"):
    +		return &applyconfigurationscorev1.PersistentVolumeClaimVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSource"):
    +		return &applyconfigurationscorev1.PersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSpec"):
    +		return &applyconfigurationscorev1.PersistentVolumeSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PersistentVolumeStatus"):
    +		return &applyconfigurationscorev1.PersistentVolumeStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PhotonPersistentDiskVolumeSource"):
    +		return &applyconfigurationscorev1.PhotonPersistentDiskVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Pod"):
    +		return &applyconfigurationscorev1.PodApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodAffinity"):
    +		return &applyconfigurationscorev1.PodAffinityApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodAffinityTerm"):
    +		return &applyconfigurationscorev1.PodAffinityTermApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodAntiAffinity"):
    +		return &applyconfigurationscorev1.PodAntiAffinityApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodCondition"):
    +		return &applyconfigurationscorev1.PodConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodDNSConfig"):
    +		return &applyconfigurationscorev1.PodDNSConfigApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodDNSConfigOption"):
    +		return &applyconfigurationscorev1.PodDNSConfigOptionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodIP"):
    +		return &applyconfigurationscorev1.PodIPApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodOS"):
    +		return &applyconfigurationscorev1.PodOSApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodReadinessGate"):
    +		return &applyconfigurationscorev1.PodReadinessGateApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodResourceClaim"):
    +		return &applyconfigurationscorev1.PodResourceClaimApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodResourceClaimStatus"):
    +		return &applyconfigurationscorev1.PodResourceClaimStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodSchedulingGate"):
    +		return &applyconfigurationscorev1.PodSchedulingGateApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodSecurityContext"):
    +		return &applyconfigurationscorev1.PodSecurityContextApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodSpec"):
    +		return &applyconfigurationscorev1.PodSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodStatus"):
    +		return &applyconfigurationscorev1.PodStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodTemplate"):
    +		return &applyconfigurationscorev1.PodTemplateApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PodTemplateSpec"):
    +		return &applyconfigurationscorev1.PodTemplateSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PortStatus"):
    +		return &applyconfigurationscorev1.PortStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PortworxVolumeSource"):
    +		return &applyconfigurationscorev1.PortworxVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("PreferredSchedulingTerm"):
    +		return &applyconfigurationscorev1.PreferredSchedulingTermApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Probe"):
    +		return &applyconfigurationscorev1.ProbeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ProbeHandler"):
    +		return &applyconfigurationscorev1.ProbeHandlerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ProjectedVolumeSource"):
    +		return &applyconfigurationscorev1.ProjectedVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("QuobyteVolumeSource"):
    +		return &applyconfigurationscorev1.QuobyteVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("RBDPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.RBDPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("RBDVolumeSource"):
    +		return &applyconfigurationscorev1.RBDVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ReplicationController"):
    +		return &applyconfigurationscorev1.ReplicationControllerApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ReplicationControllerCondition"):
    +		return &applyconfigurationscorev1.ReplicationControllerConditionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ReplicationControllerSpec"):
    +		return &applyconfigurationscorev1.ReplicationControllerSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ReplicationControllerStatus"):
    +		return &applyconfigurationscorev1.ReplicationControllerStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceClaim"):
    +		return &applyconfigurationscorev1.ResourceClaimApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceFieldSelector"):
    +		return &applyconfigurationscorev1.ResourceFieldSelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceHealth"):
    +		return &applyconfigurationscorev1.ResourceHealthApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceQuota"):
    +		return &applyconfigurationscorev1.ResourceQuotaApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceQuotaSpec"):
    +		return &applyconfigurationscorev1.ResourceQuotaSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceQuotaStatus"):
    +		return &applyconfigurationscorev1.ResourceQuotaStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceRequirements"):
    +		return &applyconfigurationscorev1.ResourceRequirementsApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ResourceStatus"):
    +		return &applyconfigurationscorev1.ResourceStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ScaleIOPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.ScaleIOPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ScaleIOVolumeSource"):
    +		return &applyconfigurationscorev1.ScaleIOVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ScopedResourceSelectorRequirement"):
    +		return &applyconfigurationscorev1.ScopedResourceSelectorRequirementApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ScopeSelector"):
    +		return &applyconfigurationscorev1.ScopeSelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SeccompProfile"):
    +		return &applyconfigurationscorev1.SeccompProfileApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Secret"):
    +		return &applyconfigurationscorev1.SecretApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecretEnvSource"):
    +		return &applyconfigurationscorev1.SecretEnvSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecretKeySelector"):
    +		return &applyconfigurationscorev1.SecretKeySelectorApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecretProjection"):
    +		return &applyconfigurationscorev1.SecretProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecretReference"):
    +		return &applyconfigurationscorev1.SecretReferenceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecretVolumeSource"):
    +		return &applyconfigurationscorev1.SecretVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SecurityContext"):
    +		return &applyconfigurationscorev1.SecurityContextApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SELinuxOptions"):
    +		return &applyconfigurationscorev1.SELinuxOptionsApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Service"):
    +		return &applyconfigurationscorev1.ServiceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ServiceAccount"):
    +		return &applyconfigurationscorev1.ServiceAccountApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ServiceAccountTokenProjection"):
    +		return &applyconfigurationscorev1.ServiceAccountTokenProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ServicePort"):
    +		return &applyconfigurationscorev1.ServicePortApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ServiceSpec"):
    +		return &applyconfigurationscorev1.ServiceSpecApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("ServiceStatus"):
    +		return &applyconfigurationscorev1.ServiceStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SessionAffinityConfig"):
    +		return &applyconfigurationscorev1.SessionAffinityConfigApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("SleepAction"):
    +		return &applyconfigurationscorev1.SleepActionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("StorageOSPersistentVolumeSource"):
    +		return &applyconfigurationscorev1.StorageOSPersistentVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("StorageOSVolumeSource"):
    +		return &applyconfigurationscorev1.StorageOSVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Sysctl"):
    +		return &applyconfigurationscorev1.SysctlApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Taint"):
    +		return &applyconfigurationscorev1.TaintApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TCPSocketAction"):
    +		return &applyconfigurationscorev1.TCPSocketActionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Toleration"):
    +		return &applyconfigurationscorev1.TolerationApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TopologySelectorLabelRequirement"):
    +		return &applyconfigurationscorev1.TopologySelectorLabelRequirementApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TopologySelectorTerm"):
    +		return &applyconfigurationscorev1.TopologySelectorTermApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TopologySpreadConstraint"):
    +		return &applyconfigurationscorev1.TopologySpreadConstraintApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TypedLocalObjectReference"):
    +		return &applyconfigurationscorev1.TypedLocalObjectReferenceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("TypedObjectReference"):
    +		return &applyconfigurationscorev1.TypedObjectReferenceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("Volume"):
    +		return &applyconfigurationscorev1.VolumeApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeDevice"):
    +		return &applyconfigurationscorev1.VolumeDeviceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeMount"):
    +		return &applyconfigurationscorev1.VolumeMountApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeMountStatus"):
    +		return &applyconfigurationscorev1.VolumeMountStatusApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeNodeAffinity"):
    +		return &applyconfigurationscorev1.VolumeNodeAffinityApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeProjection"):
    +		return &applyconfigurationscorev1.VolumeProjectionApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeResourceRequirements"):
    +		return &applyconfigurationscorev1.VolumeResourceRequirementsApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VolumeSource"):
    +		return &applyconfigurationscorev1.VolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("VsphereVirtualDiskVolumeSource"):
    +		return &applyconfigurationscorev1.VsphereVirtualDiskVolumeSourceApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("WeightedPodAffinityTerm"):
    +		return &applyconfigurationscorev1.WeightedPodAffinityTermApplyConfiguration{}
    +	case corev1.SchemeGroupVersion.WithKind("WindowsSecurityContextOptions"):
    +		return &applyconfigurationscorev1.WindowsSecurityContextOptionsApplyConfiguration{}
    +
    +		// Group=discovery.k8s.io, Version=v1
    +	case discoveryv1.SchemeGroupVersion.WithKind("Endpoint"):
    +		return &applyconfigurationsdiscoveryv1.EndpointApplyConfiguration{}
    +	case discoveryv1.SchemeGroupVersion.WithKind("EndpointConditions"):
    +		return &applyconfigurationsdiscoveryv1.EndpointConditionsApplyConfiguration{}
    +	case discoveryv1.SchemeGroupVersion.WithKind("EndpointHints"):
    +		return &applyconfigurationsdiscoveryv1.EndpointHintsApplyConfiguration{}
    +	case discoveryv1.SchemeGroupVersion.WithKind("EndpointPort"):
    +		return &applyconfigurationsdiscoveryv1.EndpointPortApplyConfiguration{}
    +	case discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice"):
    +		return &applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration{}
    +	case discoveryv1.SchemeGroupVersion.WithKind("ForZone"):
    +		return &applyconfigurationsdiscoveryv1.ForZoneApplyConfiguration{}
    +
    +		// Group=discovery.k8s.io, Version=v1beta1
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("Endpoint"):
    +		return &applyconfigurationsdiscoveryv1beta1.EndpointApplyConfiguration{}
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointConditions"):
    +		return &applyconfigurationsdiscoveryv1beta1.EndpointConditionsApplyConfiguration{}
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointHints"):
    +		return &applyconfigurationsdiscoveryv1beta1.EndpointHintsApplyConfiguration{}
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointPort"):
    +		return &applyconfigurationsdiscoveryv1beta1.EndpointPortApplyConfiguration{}
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointSlice"):
    +		return &applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration{}
    +	case discoveryv1beta1.SchemeGroupVersion.WithKind("ForZone"):
    +		return &applyconfigurationsdiscoveryv1beta1.ForZoneApplyConfiguration{}
    +
    +		// Group=events.k8s.io, Version=v1
    +	case eventsv1.SchemeGroupVersion.WithKind("Event"):
    +		return &applyconfigurationseventsv1.EventApplyConfiguration{}
    +	case eventsv1.SchemeGroupVersion.WithKind("EventSeries"):
    +		return &applyconfigurationseventsv1.EventSeriesApplyConfiguration{}
    +
    +		// Group=events.k8s.io, Version=v1beta1
    +	case eventsv1beta1.SchemeGroupVersion.WithKind("Event"):
    +		return &applyconfigurationseventsv1beta1.EventApplyConfiguration{}
    +	case eventsv1beta1.SchemeGroupVersion.WithKind("EventSeries"):
    +		return &applyconfigurationseventsv1beta1.EventSeriesApplyConfiguration{}
    +
    +		// Group=extensions, Version=v1beta1
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"):
    +		return &applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetCondition"):
    +		return &applyconfigurationsextensionsv1beta1.DaemonSetConditionApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetSpec"):
    +		return &applyconfigurationsextensionsv1beta1.DaemonSetSpecApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetStatus"):
    +		return &applyconfigurationsextensionsv1beta1.DaemonSetStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"):
    +		return &applyconfigurationsextensionsv1beta1.DaemonSetUpdateStrategyApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"):
    +		return &applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"):
    +		return &applyconfigurationsextensionsv1beta1.DeploymentConditionApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"):
    +		return &applyconfigurationsextensionsv1beta1.DeploymentSpecApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"):
    +		return &applyconfigurationsextensionsv1beta1.DeploymentStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"):
    +		return &applyconfigurationsextensionsv1beta1.DeploymentStrategyApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
    +		return &applyconfigurationsextensionsv1beta1.HTTPIngressPathApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"):
    +		return &applyconfigurationsextensionsv1beta1.HTTPIngressRuleValueApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"):
    +		return &applyconfigurationsextensionsv1beta1.IngressApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressBackend"):
    +		return &applyconfigurationsextensionsv1beta1.IngressBackendApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"):
    +		return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerIngressApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"):
    +		return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"):
    +		return &applyconfigurationsextensionsv1beta1.IngressPortStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRule"):
    +		return &applyconfigurationsextensionsv1beta1.IngressRuleApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"):
    +		return &applyconfigurationsextensionsv1beta1.IngressRuleValueApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressSpec"):
    +		return &applyconfigurationsextensionsv1beta1.IngressSpecApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressStatus"):
    +		return &applyconfigurationsextensionsv1beta1.IngressStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressTLS"):
    +		return &applyconfigurationsextensionsv1beta1.IngressTLSApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("IPBlock"):
    +		return &applyconfigurationsextensionsv1beta1.IPBlockApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicy"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicyEgressRuleApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicyIngressRuleApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicyPeerApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPort"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicyPortApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicySpec"):
    +		return &applyconfigurationsextensionsv1beta1.NetworkPolicySpecApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet"):
    +		return &applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetCondition"):
    +		return &applyconfigurationsextensionsv1beta1.ReplicaSetConditionApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetSpec"):
    +		return &applyconfigurationsextensionsv1beta1.ReplicaSetSpecApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetStatus"):
    +		return &applyconfigurationsextensionsv1beta1.ReplicaSetStatusApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"):
    +		return &applyconfigurationsextensionsv1beta1.RollbackConfigApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"):
    +		return &applyconfigurationsextensionsv1beta1.RollingUpdateDaemonSetApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"):
    +		return &applyconfigurationsextensionsv1beta1.RollingUpdateDeploymentApplyConfiguration{}
    +	case extensionsv1beta1.SchemeGroupVersion.WithKind("Scale"):
    +		return &applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration{}
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1.ExemptPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"):
    +		return &applyconfigurationsflowcontrolv1.FlowDistinguisherMethodApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchema"):
    +		return &applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaCondition"):
    +		return &applyconfigurationsflowcontrolv1.FlowSchemaConditionApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaSpec"):
    +		return &applyconfigurationsflowcontrolv1.FlowSchemaSpecApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaStatus"):
    +		return &applyconfigurationsflowcontrolv1.FlowSchemaStatusApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("GroupSubject"):
    +		return &applyconfigurationsflowcontrolv1.GroupSubjectApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1.LimitedPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("LimitResponse"):
    +		return &applyconfigurationsflowcontrolv1.LimitResponseApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1.NonResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"):
    +		return &applyconfigurationsflowcontrolv1.PolicyRulesWithSubjectsApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"):
    +		return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationConditionApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"):
    +		return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationReferenceApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"):
    +		return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationSpecApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"):
    +		return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationStatusApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("QueuingConfiguration"):
    +		return &applyconfigurationsflowcontrolv1.QueuingConfigurationApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("ResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1.ResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("ServiceAccountSubject"):
    +		return &applyconfigurationsflowcontrolv1.ServiceAccountSubjectApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsflowcontrolv1.SubjectApplyConfiguration{}
    +	case flowcontrolv1.SchemeGroupVersion.WithKind("UserSubject"):
    +		return &applyconfigurationsflowcontrolv1.UserSubjectApplyConfiguration{}
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta1
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta1.ExemptPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"):
    +		return &applyconfigurationsflowcontrolv1beta1.FlowDistinguisherMethodApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchema"):
    +		return &applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaCondition"):
    +		return &applyconfigurationsflowcontrolv1beta1.FlowSchemaConditionApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaSpec"):
    +		return &applyconfigurationsflowcontrolv1beta1.FlowSchemaSpecApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaStatus"):
    +		return &applyconfigurationsflowcontrolv1beta1.FlowSchemaStatusApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("GroupSubject"):
    +		return &applyconfigurationsflowcontrolv1beta1.GroupSubjectApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta1.LimitedPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitResponse"):
    +		return &applyconfigurationsflowcontrolv1beta1.LimitResponseApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1beta1.NonResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"):
    +		return &applyconfigurationsflowcontrolv1beta1.PolicyRulesWithSubjectsApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"):
    +		return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationConditionApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"):
    +		return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationReferenceApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"):
    +		return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationSpecApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"):
    +		return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationStatusApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("QueuingConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta1.QueuingConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1beta1.ResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ServiceAccountSubject"):
    +		return &applyconfigurationsflowcontrolv1beta1.ServiceAccountSubjectApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsflowcontrolv1beta1.SubjectApplyConfiguration{}
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithKind("UserSubject"):
    +		return &applyconfigurationsflowcontrolv1beta1.UserSubjectApplyConfiguration{}
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta2
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta2.ExemptPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"):
    +		return &applyconfigurationsflowcontrolv1beta2.FlowDistinguisherMethodApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchema"):
    +		return &applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaCondition"):
    +		return &applyconfigurationsflowcontrolv1beta2.FlowSchemaConditionApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaSpec"):
    +		return &applyconfigurationsflowcontrolv1beta2.FlowSchemaSpecApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaStatus"):
    +		return &applyconfigurationsflowcontrolv1beta2.FlowSchemaStatusApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("GroupSubject"):
    +		return &applyconfigurationsflowcontrolv1beta2.GroupSubjectApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta2.LimitedPriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitResponse"):
    +		return &applyconfigurationsflowcontrolv1beta2.LimitResponseApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("NonResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1beta2.NonResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"):
    +		return &applyconfigurationsflowcontrolv1beta2.PolicyRulesWithSubjectsApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"):
    +		return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationConditionApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"):
    +		return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationReferenceApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"):
    +		return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationSpecApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"):
    +		return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationStatusApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("QueuingConfiguration"):
    +		return &applyconfigurationsflowcontrolv1beta2.QueuingConfigurationApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ResourcePolicyRule"):
    +		return &applyconfigurationsflowcontrolv1beta2.ResourcePolicyRuleApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ServiceAccountSubject"):
    +		return &applyconfigurationsflowcontrolv1beta2.ServiceAccountSubjectApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsflowcontrolv1beta2.SubjectApplyConfiguration{}
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithKind("UserSubject"):
    +		return &applyconfigurationsflowcontrolv1beta2.UserSubjectApplyConfiguration{}
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta3
    +	case v1beta3.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"):
    +		return &flowcontrolv1beta3.ExemptPriorityLevelConfigurationApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"):
    +		return &flowcontrolv1beta3.FlowDistinguisherMethodApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("FlowSchema"):
    +		return &flowcontrolv1beta3.FlowSchemaApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaCondition"):
    +		return &flowcontrolv1beta3.FlowSchemaConditionApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaSpec"):
    +		return &flowcontrolv1beta3.FlowSchemaSpecApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaStatus"):
    +		return &flowcontrolv1beta3.FlowSchemaStatusApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("GroupSubject"):
    +		return &flowcontrolv1beta3.GroupSubjectApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"):
    +		return &flowcontrolv1beta3.LimitedPriorityLevelConfigurationApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("LimitResponse"):
    +		return &flowcontrolv1beta3.LimitResponseApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("NonResourcePolicyRule"):
    +		return &flowcontrolv1beta3.NonResourcePolicyRuleApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"):
    +		return &flowcontrolv1beta3.PolicyRulesWithSubjectsApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"):
    +		return &flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"):
    +		return &flowcontrolv1beta3.PriorityLevelConfigurationConditionApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"):
    +		return &flowcontrolv1beta3.PriorityLevelConfigurationReferenceApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"):
    +		return &flowcontrolv1beta3.PriorityLevelConfigurationSpecApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"):
    +		return &flowcontrolv1beta3.PriorityLevelConfigurationStatusApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("QueuingConfiguration"):
    +		return &flowcontrolv1beta3.QueuingConfigurationApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("ResourcePolicyRule"):
    +		return &flowcontrolv1beta3.ResourcePolicyRuleApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("ServiceAccountSubject"):
    +		return &flowcontrolv1beta3.ServiceAccountSubjectApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("Subject"):
    +		return &flowcontrolv1beta3.SubjectApplyConfiguration{}
    +	case v1beta3.SchemeGroupVersion.WithKind("UserSubject"):
    +		return &flowcontrolv1beta3.UserSubjectApplyConfiguration{}
    +
    +		// Group=imagepolicy.k8s.io, Version=v1alpha1
    +	case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReview"):
    +		return &applyconfigurationsimagepolicyv1alpha1.ImageReviewApplyConfiguration{}
    +	case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewContainerSpec"):
    +		return &applyconfigurationsimagepolicyv1alpha1.ImageReviewContainerSpecApplyConfiguration{}
    +	case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewSpec"):
    +		return &applyconfigurationsimagepolicyv1alpha1.ImageReviewSpecApplyConfiguration{}
    +	case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewStatus"):
    +		return &applyconfigurationsimagepolicyv1alpha1.ImageReviewStatusApplyConfiguration{}
    +
    +		// Group=internal.apiserver.k8s.io, Version=v1alpha1
    +	case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("ServerStorageVersion"):
    +		return &applyconfigurationsapiserverinternalv1alpha1.ServerStorageVersionApplyConfiguration{}
    +	case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersion"):
    +		return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration{}
    +	case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionCondition"):
    +		return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionConditionApplyConfiguration{}
    +	case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionStatus"):
    +		return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionStatusApplyConfiguration{}
    +
    +		// Group=meta.k8s.io, Version=v1
    +	case metav1.SchemeGroupVersion.WithKind("Condition"):
    +		return &applyconfigurationsmetav1.ConditionApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("DeleteOptions"):
    +		return &applyconfigurationsmetav1.DeleteOptionsApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("LabelSelector"):
    +		return &applyconfigurationsmetav1.LabelSelectorApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("LabelSelectorRequirement"):
    +		return &applyconfigurationsmetav1.LabelSelectorRequirementApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("ManagedFieldsEntry"):
    +		return &applyconfigurationsmetav1.ManagedFieldsEntryApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("ObjectMeta"):
    +		return &applyconfigurationsmetav1.ObjectMetaApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("OwnerReference"):
    +		return &applyconfigurationsmetav1.OwnerReferenceApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("Preconditions"):
    +		return &applyconfigurationsmetav1.PreconditionsApplyConfiguration{}
    +	case metav1.SchemeGroupVersion.WithKind("TypeMeta"):
    +		return &applyconfigurationsmetav1.TypeMetaApplyConfiguration{}
    +
    +		// Group=networking.k8s.io, Version=v1
    +	case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
    +		return &applyconfigurationsnetworkingv1.HTTPIngressPathApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"):
    +		return &applyconfigurationsnetworkingv1.HTTPIngressRuleValueApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("Ingress"):
    +		return &applyconfigurationsnetworkingv1.IngressApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressBackend"):
    +		return &applyconfigurationsnetworkingv1.IngressBackendApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressClass"):
    +		return &applyconfigurationsnetworkingv1.IngressClassApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressClassParametersReference"):
    +		return &applyconfigurationsnetworkingv1.IngressClassParametersReferenceApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressClassSpec"):
    +		return &applyconfigurationsnetworkingv1.IngressClassSpecApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"):
    +		return &applyconfigurationsnetworkingv1.IngressLoadBalancerIngressApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"):
    +		return &applyconfigurationsnetworkingv1.IngressLoadBalancerStatusApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressPortStatus"):
    +		return &applyconfigurationsnetworkingv1.IngressPortStatusApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressRule"):
    +		return &applyconfigurationsnetworkingv1.IngressRuleApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressRuleValue"):
    +		return &applyconfigurationsnetworkingv1.IngressRuleValueApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressServiceBackend"):
    +		return &applyconfigurationsnetworkingv1.IngressServiceBackendApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressSpec"):
    +		return &applyconfigurationsnetworkingv1.IngressSpecApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressStatus"):
    +		return &applyconfigurationsnetworkingv1.IngressStatusApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IngressTLS"):
    +		return &applyconfigurationsnetworkingv1.IngressTLSApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("IPBlock"):
    +		return &applyconfigurationsnetworkingv1.IPBlockApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicyEgressRuleApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicyIngressRuleApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicyPeerApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPort"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicyPortApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicySpec"):
    +		return &applyconfigurationsnetworkingv1.NetworkPolicySpecApplyConfiguration{}
    +	case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"):
    +		return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{}
    +
    +		// Group=networking.k8s.io, Version=v1alpha1
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddress"):
    +		return &applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration{}
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddressSpec"):
    +		return &applyconfigurationsnetworkingv1alpha1.IPAddressSpecApplyConfiguration{}
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("ParentReference"):
    +		return &applyconfigurationsnetworkingv1alpha1.ParentReferenceApplyConfiguration{}
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR"):
    +		return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration{}
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"):
    +		return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRSpecApplyConfiguration{}
    +	case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"):
    +		return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRStatusApplyConfiguration{}
    +
    +		// Group=networking.k8s.io, Version=v1beta1
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
    +		return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"):
    +		return &applyconfigurationsnetworkingv1beta1.HTTPIngressRuleValueApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressBackend"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressBackendApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClass"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassParametersReference"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressClassParametersReferenceApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassSpec"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressClassSpecApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerIngressApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerStatusApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressPortStatusApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRule"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressRuleApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressRuleValueApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressSpec"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressSpecApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressStatus"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressStatusApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IngressTLS"):
    +		return &applyconfigurationsnetworkingv1beta1.IngressTLSApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddress"):
    +		return &applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddressSpec"):
    +		return &applyconfigurationsnetworkingv1beta1.IPAddressSpecApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("ParentReference"):
    +		return &applyconfigurationsnetworkingv1beta1.ParentReferenceApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDR"):
    +		return &applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"):
    +		return &applyconfigurationsnetworkingv1beta1.ServiceCIDRSpecApplyConfiguration{}
    +	case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"):
    +		return &applyconfigurationsnetworkingv1beta1.ServiceCIDRStatusApplyConfiguration{}
    +
    +		// Group=node.k8s.io, Version=v1
    +	case nodev1.SchemeGroupVersion.WithKind("Overhead"):
    +		return &applyconfigurationsnodev1.OverheadApplyConfiguration{}
    +	case nodev1.SchemeGroupVersion.WithKind("RuntimeClass"):
    +		return &applyconfigurationsnodev1.RuntimeClassApplyConfiguration{}
    +	case nodev1.SchemeGroupVersion.WithKind("Scheduling"):
    +		return &applyconfigurationsnodev1.SchedulingApplyConfiguration{}
    +
    +		// Group=node.k8s.io, Version=v1alpha1
    +	case nodev1alpha1.SchemeGroupVersion.WithKind("Overhead"):
    +		return &applyconfigurationsnodev1alpha1.OverheadApplyConfiguration{}
    +	case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClass"):
    +		return &applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration{}
    +	case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClassSpec"):
    +		return &applyconfigurationsnodev1alpha1.RuntimeClassSpecApplyConfiguration{}
    +	case nodev1alpha1.SchemeGroupVersion.WithKind("Scheduling"):
    +		return &applyconfigurationsnodev1alpha1.SchedulingApplyConfiguration{}
    +
    +		// Group=node.k8s.io, Version=v1beta1
    +	case nodev1beta1.SchemeGroupVersion.WithKind("Overhead"):
    +		return &applyconfigurationsnodev1beta1.OverheadApplyConfiguration{}
    +	case nodev1beta1.SchemeGroupVersion.WithKind("RuntimeClass"):
    +		return &applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration{}
    +	case nodev1beta1.SchemeGroupVersion.WithKind("Scheduling"):
    +		return &applyconfigurationsnodev1beta1.SchedulingApplyConfiguration{}
    +
    +		// Group=policy, Version=v1
    +	case policyv1.SchemeGroupVersion.WithKind("Eviction"):
    +		return &applyconfigurationspolicyv1.EvictionApplyConfiguration{}
    +	case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"):
    +		return &applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration{}
    +	case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"):
    +		return &applyconfigurationspolicyv1.PodDisruptionBudgetSpecApplyConfiguration{}
    +	case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"):
    +		return &applyconfigurationspolicyv1.PodDisruptionBudgetStatusApplyConfiguration{}
    +
    +		// Group=policy, Version=v1beta1
    +	case policyv1beta1.SchemeGroupVersion.WithKind("Eviction"):
    +		return &applyconfigurationspolicyv1beta1.EvictionApplyConfiguration{}
    +	case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"):
    +		return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration{}
    +	case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"):
    +		return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetSpecApplyConfiguration{}
    +	case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"):
    +		return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetStatusApplyConfiguration{}
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1
    +	case rbacv1.SchemeGroupVersion.WithKind("AggregationRule"):
    +		return &applyconfigurationsrbacv1.AggregationRuleApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("ClusterRole"):
    +		return &applyconfigurationsrbacv1.ClusterRoleApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding"):
    +		return &applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("PolicyRule"):
    +		return &applyconfigurationsrbacv1.PolicyRuleApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("Role"):
    +		return &applyconfigurationsrbacv1.RoleApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("RoleBinding"):
    +		return &applyconfigurationsrbacv1.RoleBindingApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("RoleRef"):
    +		return &applyconfigurationsrbacv1.RoleRefApplyConfiguration{}
    +	case rbacv1.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsrbacv1.SubjectApplyConfiguration{}
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1alpha1
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("AggregationRule"):
    +		return &applyconfigurationsrbacv1alpha1.AggregationRuleApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRole"):
    +		return &applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding"):
    +		return &applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("PolicyRule"):
    +		return &applyconfigurationsrbacv1alpha1.PolicyRuleApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("Role"):
    +		return &applyconfigurationsrbacv1alpha1.RoleApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleBinding"):
    +		return &applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleRef"):
    +		return &applyconfigurationsrbacv1alpha1.RoleRefApplyConfiguration{}
    +	case rbacv1alpha1.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsrbacv1alpha1.SubjectApplyConfiguration{}
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1beta1
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("AggregationRule"):
    +		return &applyconfigurationsrbacv1beta1.AggregationRuleApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRole"):
    +		return &applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding"):
    +		return &applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("PolicyRule"):
    +		return &applyconfigurationsrbacv1beta1.PolicyRuleApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("Role"):
    +		return &applyconfigurationsrbacv1beta1.RoleApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("RoleBinding"):
    +		return &applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("RoleRef"):
    +		return &applyconfigurationsrbacv1beta1.RoleRefApplyConfiguration{}
    +	case rbacv1beta1.SchemeGroupVersion.WithKind("Subject"):
    +		return &applyconfigurationsrbacv1beta1.SubjectApplyConfiguration{}
    +
    +		// Group=resource.k8s.io, Version=v1alpha3
    +	case v1alpha3.SchemeGroupVersion.WithKind("AllocationResult"):
    +		return &resourcev1alpha3.AllocationResultApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("BasicDevice"):
    +		return &resourcev1alpha3.BasicDeviceApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("CELDeviceSelector"):
    +		return &resourcev1alpha3.CELDeviceSelectorApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("Device"):
    +		return &resourcev1alpha3.DeviceApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationConfiguration"):
    +		return &resourcev1alpha3.DeviceAllocationConfigurationApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationResult"):
    +		return &resourcev1alpha3.DeviceAllocationResultApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceAttribute"):
    +		return &resourcev1alpha3.DeviceAttributeApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaim"):
    +		return &resourcev1alpha3.DeviceClaimApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaimConfiguration"):
    +		return &resourcev1alpha3.DeviceClaimConfigurationApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceClass"):
    +		return &resourcev1alpha3.DeviceClassApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassConfiguration"):
    +		return &resourcev1alpha3.DeviceClassConfigurationApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassSpec"):
    +		return &resourcev1alpha3.DeviceClassSpecApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceConfiguration"):
    +		return &resourcev1alpha3.DeviceConfigurationApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceConstraint"):
    +		return &resourcev1alpha3.DeviceConstraintApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequest"):
    +		return &resourcev1alpha3.DeviceRequestApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequestAllocationResult"):
    +		return &resourcev1alpha3.DeviceRequestAllocationResultApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("DeviceSelector"):
    +		return &resourcev1alpha3.DeviceSelectorApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"):
    +		return &resourcev1alpha3.OpaqueDeviceConfigurationApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext"):
    +		return &resourcev1alpha3.PodSchedulingContextApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"):
    +		return &resourcev1alpha3.PodSchedulingContextSpecApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"):
    +		return &resourcev1alpha3.PodSchedulingContextStatusApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"):
    +		return &resourcev1alpha3.ResourceClaimApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"):
    +		return &resourcev1alpha3.ResourceClaimConsumerReferenceApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"):
    +		return &resourcev1alpha3.ResourceClaimSchedulingStatusApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSpec"):
    +		return &resourcev1alpha3.ResourceClaimSpecApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimStatus"):
    +		return &resourcev1alpha3.ResourceClaimStatusApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate"):
    +		return &resourcev1alpha3.ResourceClaimTemplateApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"):
    +		return &resourcev1alpha3.ResourceClaimTemplateSpecApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourcePool"):
    +		return &resourcev1alpha3.ResourcePoolApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice"):
    +		return &resourcev1alpha3.ResourceSliceApplyConfiguration{}
    +	case v1alpha3.SchemeGroupVersion.WithKind("ResourceSliceSpec"):
    +		return &resourcev1alpha3.ResourceSliceSpecApplyConfiguration{}
    +
    +		// Group=scheduling.k8s.io, Version=v1
    +	case schedulingv1.SchemeGroupVersion.WithKind("PriorityClass"):
    +		return &applyconfigurationsschedulingv1.PriorityClassApplyConfiguration{}
    +
    +		// Group=scheduling.k8s.io, Version=v1alpha1
    +	case schedulingv1alpha1.SchemeGroupVersion.WithKind("PriorityClass"):
    +		return &applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration{}
    +
    +		// Group=scheduling.k8s.io, Version=v1beta1
    +	case schedulingv1beta1.SchemeGroupVersion.WithKind("PriorityClass"):
    +		return &applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration{}
    +
    +		// Group=storage.k8s.io, Version=v1
    +	case storagev1.SchemeGroupVersion.WithKind("CSIDriver"):
    +		return &applyconfigurationsstoragev1.CSIDriverApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("CSIDriverSpec"):
    +		return &applyconfigurationsstoragev1.CSIDriverSpecApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("CSINode"):
    +		return &applyconfigurationsstoragev1.CSINodeApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("CSINodeDriver"):
    +		return &applyconfigurationsstoragev1.CSINodeDriverApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("CSINodeSpec"):
    +		return &applyconfigurationsstoragev1.CSINodeSpecApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("CSIStorageCapacity"):
    +		return &applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("StorageClass"):
    +		return &applyconfigurationsstoragev1.StorageClassApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("TokenRequest"):
    +		return &applyconfigurationsstoragev1.TokenRequestApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeAttachment"):
    +		return &applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"):
    +		return &applyconfigurationsstoragev1.VolumeAttachmentSourceApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"):
    +		return &applyconfigurationsstoragev1.VolumeAttachmentSpecApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"):
    +		return &applyconfigurationsstoragev1.VolumeAttachmentStatusApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeError"):
    +		return &applyconfigurationsstoragev1.VolumeErrorApplyConfiguration{}
    +	case storagev1.SchemeGroupVersion.WithKind("VolumeNodeResources"):
    +		return &applyconfigurationsstoragev1.VolumeNodeResourcesApplyConfiguration{}
    +
    +		// Group=storage.k8s.io, Version=v1alpha1
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity"):
    +		return &applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSourceApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSpecApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeAttachmentStatusApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration{}
    +	case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeError"):
    +		return &applyconfigurationsstoragev1alpha1.VolumeErrorApplyConfiguration{}
    +
    +		// Group=storage.k8s.io, Version=v1beta1
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriver"):
    +		return &applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriverSpec"):
    +		return &applyconfigurationsstoragev1beta1.CSIDriverSpecApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSINode"):
    +		return &applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeDriver"):
    +		return &applyconfigurationsstoragev1beta1.CSINodeDriverApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeSpec"):
    +		return &applyconfigurationsstoragev1beta1.CSINodeSpecApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity"):
    +		return &applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("StorageClass"):
    +		return &applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("TokenRequest"):
    +		return &applyconfigurationsstoragev1beta1.TokenRequestApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachment"):
    +		return &applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"):
    +		return &applyconfigurationsstoragev1beta1.VolumeAttachmentSourceApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"):
    +		return &applyconfigurationsstoragev1beta1.VolumeAttachmentSpecApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"):
    +		return &applyconfigurationsstoragev1beta1.VolumeAttachmentStatusApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass"):
    +		return &applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeError"):
    +		return &applyconfigurationsstoragev1beta1.VolumeErrorApplyConfiguration{}
    +	case storagev1beta1.SchemeGroupVersion.WithKind("VolumeNodeResources"):
    +		return &applyconfigurationsstoragev1beta1.VolumeNodeResourcesApplyConfiguration{}
    +
    +		// Group=storagemigration.k8s.io, Version=v1alpha1
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("GroupVersionResource"):
    +		return &applyconfigurationsstoragemigrationv1alpha1.GroupVersionResourceApplyConfiguration{}
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("MigrationCondition"):
    +		return &applyconfigurationsstoragemigrationv1alpha1.MigrationConditionApplyConfiguration{}
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"):
    +		return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration{}
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationSpec"):
    +		return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationSpecApplyConfiguration{}
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationStatus"):
    +		return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationStatusApplyConfiguration{}
    +
    +	}
    +	return nil
    +}
    +
    +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter {
    +	return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()}
    +}
    diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go
    index f8a78e1ef4..e5d9e7f800 100644
    --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go
    +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go
    @@ -47,7 +47,9 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me
     		Verb:     "get",
     		Resource: schema.GroupVersionResource{Resource: "resource"},
     	}
    -	c.Invokes(action, nil)
    +	if _, err := c.Invokes(action, nil); err != nil {
    +		return nil, err
    +	}
     	for _, resourceList := range c.Resources {
     		if resourceList.GroupVersion == groupVersion {
     			return resourceList, nil
    @@ -77,7 +79,9 @@ func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav
     		Verb:     "get",
     		Resource: schema.GroupVersionResource{Resource: "resource"},
     	}
    -	c.Invokes(action, nil)
    +	if _, err = c.Invokes(action, nil); err != nil {
    +		return resultGroups, c.Resources, err
    +	}
     	return resultGroups, c.Resources, nil
     }
     
    @@ -100,7 +104,9 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) {
     		Verb:     "get",
     		Resource: schema.GroupVersionResource{Resource: "group"},
     	}
    -	c.Invokes(action, nil)
    +	if _, err := c.Invokes(action, nil); err != nil {
    +		return nil, err
    +	}
     
     	groups := map[string]*metav1.APIGroup{}
     
    diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go
    index 4b54859530..326da7cbdf 100644
    --- a/vendor/k8s.io/client-go/dynamic/simple.go
    +++ b/vendor/k8s.io/client-go/dynamic/simple.go
    @@ -20,6 +20,7 @@ import (
     	"context"
     	"fmt"
     	"net/http"
    +	"time"
     
     	"k8s.io/apimachinery/pkg/api/meta"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    @@ -29,6 +30,9 @@ import (
     	"k8s.io/apimachinery/pkg/types"
     	"k8s.io/apimachinery/pkg/watch"
     	"k8s.io/client-go/rest"
    +	"k8s.io/client-go/util/consistencydetector"
    +	"k8s.io/client-go/util/watchlist"
    +	"k8s.io/klog/v2"
     )
     
     type DynamicClient struct {
    @@ -293,6 +297,24 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav
     }
     
     func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
    +	if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil {
    +		klog.Warningf("Failed preparing watchlist options for %v, falling back to the standard LIST semantics, err = %v", c.resource, watchListOptionsErr)
    +	} else if hasWatchListOptionsPrepared {
    +		result, err := c.watchList(ctx, watchListOptions)
    +		if err == nil {
    +			consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result)
    +			return result, nil
    +		}
    +		klog.Warningf("The watchlist request for %v ended with an error, falling back to the standard LIST semantics, err = %v", c.resource, err)
    +	}
    +	result, err := c.list(ctx, opts)
    +	if err == nil {
    +		consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result)
    +	}
    +	return result, err
    +}
    +
    +func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
     	if err := validateNamespaceWithOptionalName(c.namespace); err != nil {
     		return nil, err
     	}
    @@ -319,6 +341,27 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption
     	return list, nil
     }
     
    +// watchList establishes a watch stream with the server and returns an unstructured list.
    +func (c *dynamicResourceClient) watchList(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
    +	if err := validateNamespaceWithOptionalName(c.namespace); err != nil {
    +		return nil, err
    +	}
    +
    +	var timeout time.Duration
    +	if opts.TimeoutSeconds != nil {
    +		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    +	}
    +
    +	result := &unstructured.UnstructuredList{}
    +	err := c.client.client.Get().AbsPath(c.makeURLSegments("")...).
    +		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
    +		Timeout(timeout).
    +		WatchList(ctx).
    +		Into(result)
    +
    +	return result, err
    +}
    +
     func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	opts.Watch = true
     	if err := validateNamespaceWithOptionalName(c.namespace); err != nil {
    diff --git a/vendor/k8s.io/client-go/features/envvar.go b/vendor/k8s.io/client-go/features/envvar.go
    index f9edfdf0d9..8c3f887dc4 100644
    --- a/vendor/k8s.io/client-go/features/envvar.go
    +++ b/vendor/k8s.io/client-go/features/envvar.go
    @@ -47,6 +47,10 @@ var _ Gates = &envVarFeatureGates{}
     //
     // Please note that environmental variables can only be set to the boolean value.
     // Incorrect values will be ignored and logged.
    +//
    +// Features can also be set directly via the Set method.
    +// In that case, these features take precedence over
    +// features set via environmental variables.
     func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates {
     	known := map[Feature]FeatureSpec{}
     	for name, spec := range features {
    @@ -57,7 +61,8 @@ func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates
     		callSiteName: naming.GetNameFromCallsite(internalPackages...),
     		known:        known,
     	}
    -	fg.enabled.Store(map[Feature]bool{})
    +	fg.enabledViaEnvVar.Store(map[Feature]bool{})
    +	fg.enabledViaSetMethod = map[Feature]bool{}
     
     	return fg
     }
    @@ -74,17 +79,34 @@ type envVarFeatureGates struct {
     	// known holds known feature gates
     	known map[Feature]FeatureSpec
     
    -	// enabled holds a map[Feature]bool
    +	// enabledViaEnvVar holds a map[Feature]bool
     	// with values explicitly set via env var
    -	enabled atomic.Value
    +	enabledViaEnvVar atomic.Value
    +
    +	// lockEnabledViaSetMethod protects enabledViaSetMethod
    +	lockEnabledViaSetMethod sync.RWMutex
    +
    +	// enabledViaSetMethod holds values explicitly set
    +	// via Set method, features stored in this map take
    +	// precedence over features stored in enabledViaEnvVar
    +	enabledViaSetMethod map[Feature]bool
     
     	// readEnvVars holds the boolean value which
     	// indicates whether readEnvVarsOnce has been called.
     	readEnvVars atomic.Bool
     }
     
    -// Enabled returns true if the key is enabled.  If the key is not known, this call will panic.
    +// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
     func (f *envVarFeatureGates) Enabled(key Feature) bool {
    +	if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok {
    +		// ensue that the state of all known features
    +		// is loaded from environment variables
    +		// on the first call to Enabled method.
    +		if !f.hasAlreadyReadEnvVar() {
    +			_ = f.getEnabledMapFromEnvVar()
    +		}
    +		return v
    +	}
     	if v, ok := f.getEnabledMapFromEnvVar()[key]; ok {
     		return v
     	}
    @@ -94,6 +116,26 @@ func (f *envVarFeatureGates) Enabled(key Feature) bool {
     	panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName))
     }
     
    +// Set sets the given feature to the given value.
    +//
    +// Features set via this method take precedence over
    +// the features set via environment variables.
    +func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error {
    +	feature, ok := f.known[featureName]
    +	if !ok {
    +		return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName)
    +	}
    +	if feature.LockToDefault && feature.Default != featureValue {
    +		return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default)
    +	}
    +
    +	f.lockEnabledViaSetMethod.Lock()
    +	defer f.lockEnabledViaSetMethod.Unlock()
    +	f.enabledViaSetMethod[featureName] = featureValue
    +
    +	return nil
    +}
    +
     // getEnabledMapFromEnvVar will fill the enabled map on the first call.
     // This is the only time a known feature can be set to a value
     // read from the corresponding environmental variable.
    @@ -119,7 +161,7 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool {
     				featureGatesState[feature] = boolVal
     			}
     		}
    -		f.enabled.Store(featureGatesState)
    +		f.enabledViaEnvVar.Store(featureGatesState)
     		f.readEnvVars.Store(true)
     
     		for feature, featureSpec := range f.known {
    @@ -130,7 +172,15 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool {
     			klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default)
     		}
     	})
    -	return f.enabled.Load().(map[Feature]bool)
    +	return f.enabledViaEnvVar.Load().(map[Feature]bool)
    +}
    +
    +func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) {
    +	f.lockEnabledViaSetMethod.RLock()
    +	defer f.lockEnabledViaSetMethod.RUnlock()
    +
    +	value, found := f.enabledViaSetMethod[key]
    +	return value, found
     }
     
     func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool {
    diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go
    new file mode 100644
    index 0000000000..b5be84318d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/gentype/type.go
    @@ -0,0 +1,360 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package gentype
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +	"time"
    +
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	rest "k8s.io/client-go/rest"
    +	"k8s.io/client-go/util/consistencydetector"
    +	"k8s.io/client-go/util/watchlist"
    +	"k8s.io/klog/v2"
    +)
    +
    +// objectWithMeta matches objects implementing both runtime.Object and metav1.Object.
    +type objectWithMeta interface {
    +	runtime.Object
    +	metav1.Object
    +}
    +
    +// namedObject matches comparable objects implementing GetName(); it is intended for use with apply declarative configurations.
    +type namedObject interface {
    +	comparable
    +	GetName() *string
    +}
    +
    +// Client represents a client, optionally namespaced, with no support for lists or apply declarative configurations.
    +type Client[T objectWithMeta] struct {
    +	resource       string
    +	client         rest.Interface
    +	namespace      string // "" for non-namespaced clients
    +	newObject      func() T
    +	parameterCodec runtime.ParameterCodec
    +}
    +
    +// ClientWithList represents a client with support for lists.
    +type ClientWithList[T objectWithMeta, L runtime.Object] struct {
    +	*Client[T]
    +	alsoLister[T, L]
    +}
    +
    +// ClientWithApply represents a client with support for apply declarative configurations.
    +type ClientWithApply[T objectWithMeta, C namedObject] struct {
    +	*Client[T]
    +	alsoApplier[T, C]
    +}
    +
    +// ClientWithListAndApply represents a client with support for lists and apply declarative configurations.
    +type ClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct {
    +	*Client[T]
    +	alsoLister[T, L]
    +	alsoApplier[T, C]
    +}
    +
    +// Helper types for composition
    +type alsoLister[T objectWithMeta, L runtime.Object] struct {
    +	client  *Client[T]
    +	newList func() L
    +}
    +
    +type alsoApplier[T objectWithMeta, C namedObject] struct {
    +	client *Client[T]
    +}
    +
    +// NewClient constructs a client, namespaced or not, with no support for lists or apply.
    +// Non-namespaced clients are constructed by passing an empty namespace ("").
    +func NewClient[T objectWithMeta](
    +	resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
    +) *Client[T] {
    +	return &Client[T]{
    +		resource:       resource,
    +		client:         client,
    +		parameterCodec: parameterCodec,
    +		namespace:      namespace,
    +		newObject:      emptyObjectCreator,
    +	}
    +}
    +
    +// NewClientWithList constructs a namespaced client with support for lists.
    +func NewClientWithList[T objectWithMeta, L runtime.Object](
    +	resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
    +	emptyListCreator func() L,
    +) *ClientWithList[T, L] {
    +	typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
    +	return &ClientWithList[T, L]{
    +		typeClient,
    +		alsoLister[T, L]{typeClient, emptyListCreator},
    +	}
    +}
    +
    +// NewClientWithApply constructs a namespaced client with support for apply declarative configurations.
    +func NewClientWithApply[T objectWithMeta, C namedObject](
    +	resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
    +) *ClientWithApply[T, C] {
    +	typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
    +	return &ClientWithApply[T, C]{
    +		typeClient,
    +		alsoApplier[T, C]{typeClient},
    +	}
    +}
    +
    +// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations.
    +func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject](
    +	resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
    +	emptyListCreator func() L,
    +) *ClientWithListAndApply[T, L, C] {
    +	typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
    +	return &ClientWithListAndApply[T, L, C]{
    +		typeClient,
    +		alsoLister[T, L]{typeClient, emptyListCreator},
    +		alsoApplier[T, C]{typeClient},
    +	}
    +}
    +
    +// GetClient returns the REST interface.
    +func (c *Client[T]) GetClient() rest.Interface {
    +	return c.client
    +}
    +
    +// GetNamespace returns the client's namespace, if any.
    +func (c *Client[T]) GetNamespace() string {
    +	return c.namespace
    +}
    +
    +// Get takes name of the resource, and returns the corresponding object, and an error if there is any.
    +func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) {
    +	result := c.newObject()
    +	err := c.client.Get().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		Name(name).
    +		VersionedParams(&options, c.parameterCodec).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// List takes label and field selectors, and returns the list of resources that match those selectors.
    +func (l *alsoLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (L, error) {
    +	if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil {
    +		klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr)
    +	} else if hasWatchListOptionsPrepared {
    +		result, err := l.watchList(ctx, watchListOptions)
    +		if err == nil {
    +			consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, "watchlist request for "+l.client.resource, l.list, opts, result)
    +			return result, nil
    +		}
    +		klog.Warningf("The watchlist request for %s ended with an error, falling back to the standard LIST semantics, err = %v", l.client.resource, err)
    +	}
    +	result, err := l.list(ctx, opts)
    +	if err == nil {
    +		consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, "list request for "+l.client.resource, l.list, opts, result)
    +	}
    +	return result, err
    +}
    +
    +func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L, error) {
    +	list := l.newList()
    +	var timeout time.Duration
    +	if opts.TimeoutSeconds != nil {
    +		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    +	}
    +	err := l.client.client.Get().
    +		NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
    +		Resource(l.client.resource).
    +		VersionedParams(&opts, l.client.parameterCodec).
    +		Timeout(timeout).
    +		Do(ctx).
    +		Into(list)
    +	return list, err
    +}
    +
    +// watchList establishes a watch stream with the server and returns the list of resources.
    +func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOptions) (result L, err error) {
    +	var timeout time.Duration
    +	if opts.TimeoutSeconds != nil {
    +		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    +	}
    +	result = l.newList()
    +	err = l.client.client.Get().
    +		NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
    +		Resource(l.client.resource).
    +		VersionedParams(&opts, l.client.parameterCodec).
    +		Timeout(timeout).
    +		WatchList(ctx).
    +		Into(result)
    +	return
    +}
    +
    +// Watch returns a watch.Interface that watches the requested resources.
    +func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    +	var timeout time.Duration
    +	if opts.TimeoutSeconds != nil {
    +		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    +	}
    +	opts.Watch = true
    +	return c.client.Get().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		VersionedParams(&opts, c.parameterCodec).
    +		Timeout(timeout).
    +		Watch(ctx)
    +}
    +
    +// Create takes the representation of a resource and creates it.  Returns the server's representation of the resource, and an error, if there is any.
    +func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) {
    +	result := c.newObject()
    +	err := c.client.Post().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		VersionedParams(&opts, c.parameterCodec).
    +		Body(obj).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any.
    +func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) {
    +	result := c.newObject()
    +	err := c.client.Put().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		Name(obj.GetName()).
    +		VersionedParams(&opts, c.parameterCodec).
    +		Body(obj).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// UpdateStatus updates the status subresource of a resource. Returns the server's representation of the resource, and an error, if there is any.
    +func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) {
    +	result := c.newObject()
    +	err := c.client.Put().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		Name(obj.GetName()).
    +		SubResource("status").
    +		VersionedParams(&opts, c.parameterCodec).
    +		Body(obj).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// Delete takes name of the resource and deletes it. Returns an error if one occurs.
    +func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    +	return c.client.Delete().
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		Name(name).
    +		Body(&opts).
    +		Do(ctx).
    +		Error()
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    +	var timeout time.Duration
    +	if listOpts.TimeoutSeconds != nil {
    +		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    +	}
    +	return l.client.client.Delete().
    +		NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
    +		Resource(l.client.resource).
    +		VersionedParams(&listOpts, l.client.parameterCodec).
    +		Timeout(timeout).
    +		Body(&opts).
    +		Do(ctx).
    +		Error()
    +}
    +
    +// Patch applies the patch and returns the patched resource.
    +func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) {
    +	result := c.newObject()
    +	err := c.client.Patch(pt).
    +		NamespaceIfScoped(c.namespace, c.namespace != "").
    +		Resource(c.resource).
    +		Name(name).
    +		SubResource(subresources...).
    +		VersionedParams(&opts, c.parameterCodec).
    +		Body(data).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied resource.
    +func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) {
    +	result := a.client.newObject()
    +	if obj == *new(C) {
    +		return *new(T), fmt.Errorf("object provided to Apply must not be nil")
    +	}
    +	patchOpts := opts.ToPatchOptions()
    +	data, err := json.Marshal(obj)
    +	if err != nil {
    +		return *new(T), err
    +	}
    +	if obj.GetName() == nil {
    +		return *new(T), fmt.Errorf("obj.Name must be provided to Apply")
    +	}
    +	err = a.client.client.Patch(types.ApplyPatchType).
    +		NamespaceIfScoped(a.client.namespace, a.client.namespace != "").
    +		Resource(a.client.resource).
    +		Name(*obj.GetName()).
    +		VersionedParams(&patchOpts, a.client.parameterCodec).
    +		Body(data).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource.
    +func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) {
    +	if obj == *new(C) {
    +		return *new(T), fmt.Errorf("object provided to Apply must not be nil")
    +	}
    +	patchOpts := opts.ToPatchOptions()
    +	data, err := json.Marshal(obj)
    +	if err != nil {
    +		return *new(T), err
    +	}
    +
    +	if obj.GetName() == nil {
    +		return *new(T), fmt.Errorf("obj.Name must be provided to Apply")
    +	}
    +
    +	result := a.client.newObject()
    +	err = a.client.client.Patch(types.ApplyPatchType).
    +		NamespaceIfScoped(a.client.namespace, a.client.namespace != "").
    +		Resource(a.client.resource).
    +		Name(*obj.GetName()).
    +		SubResource("status").
    +		VersionedParams(&patchOpts, a.client.parameterCodec).
    +		Body(data).
    +		Do(ctx).
    +		Into(result)
    +	return result, err
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go
    new file mode 100644
    index 0000000000..7cd8d72766
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package admissionregistration
    +
    +import (
    +	v1 "k8s.io/client-go/informers/admissionregistration/v1"
    +	v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go
    new file mode 100644
    index 0000000000..08769d3cca
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
    +	MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer
    +	// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +	ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer
    +	// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +	ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer
    +	// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
    +	ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
    +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer {
    +	return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer {
    +	return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer {
    +	return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
    +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer {
    +	return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..b768f6f7f3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/admissionregistration/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for
    +// MutatingWebhookConfigurations.
    +type MutatingWebhookConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.MutatingWebhookConfigurationLister
    +}
    +
    +type mutatingWebhookConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1.MutatingWebhookConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister {
    +	return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..eaf9414e26
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/admissionregistration/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicies.
    +type ValidatingAdmissionPolicyInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ValidatingAdmissionPolicyLister
    +}
    +
    +type validatingAdmissionPolicyInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1.ValidatingAdmissionPolicy{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Lister() v1.ValidatingAdmissionPolicyLister {
    +	return v1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..8cd61bf28a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/admissionregistration/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicyBindings.
    +type ValidatingAdmissionPolicyBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ValidatingAdmissionPolicyBindingLister
    +}
    +
    +type validatingAdmissionPolicyBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1.ValidatingAdmissionPolicyBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1.ValidatingAdmissionPolicyBindingLister {
    +	return v1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..8ddcdf2d90
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/admissionregistration/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for
    +// ValidatingWebhookConfigurations.
    +type ValidatingWebhookConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ValidatingWebhookConfigurationLister
    +}
    +
    +type validatingWebhookConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1.ValidatingWebhookConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister {
    +	return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..738063ee72
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +	ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer
    +	// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +	ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer {
    +	return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer {
    +	return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..01b8a4ab8e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicies.
    +type ValidatingAdmissionPolicyInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ValidatingAdmissionPolicyLister
    +}
    +
    +type validatingAdmissionPolicyInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Lister() v1alpha1.ValidatingAdmissionPolicyLister {
    +	return v1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..bd531512b6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicyBindings.
    +type ValidatingAdmissionPolicyBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister
    +}
    +
    +type validatingAdmissionPolicyBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister {
    +	return v1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
    new file mode 100644
    index 0000000000..815960df59
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
    +	MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer
    +	// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +	ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer
    +	// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +	ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer
    +	// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
    +	ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
    +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer {
    +	return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
    +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer {
    +	return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
    +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer {
    +	return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
    +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer {
    +	return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..12c8ec1fbd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for
    +// MutatingWebhookConfigurations.
    +type MutatingWebhookConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.MutatingWebhookConfigurationLister
    +}
    +
    +type mutatingWebhookConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1beta1.MutatingWebhookConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister {
    +	return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..d0e9cd64c8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicies.
    +type ValidatingAdmissionPolicyInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ValidatingAdmissionPolicyLister
    +}
    +
    +type validatingAdmissionPolicyInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1beta1.ValidatingAdmissionPolicy{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyInformer) Lister() v1beta1.ValidatingAdmissionPolicyLister {
    +	return v1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..7641e99406
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for
    +// ValidatingAdmissionPolicyBindings.
    +type ValidatingAdmissionPolicyBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ValidatingAdmissionPolicyBindingLister
    +}
    +
    +type validatingAdmissionPolicyBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer)
    +}
    +
    +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1beta1.ValidatingAdmissionPolicyBindingLister {
    +	return v1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..05eb05097f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for
    +// ValidatingWebhookConfigurations.
    +type ValidatingWebhookConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ValidatingWebhookConfigurationLister
    +}
    +
    +type validatingWebhookConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&admissionregistrationv1beta1.ValidatingWebhookConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister {
    +	return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go
    new file mode 100644
    index 0000000000..122c030998
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go
    @@ -0,0 +1,46 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package apiserverinternal
    +
    +import (
    +	v1alpha1 "k8s.io/client-go/informers/apiserverinternal/v1alpha1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..9778325c65
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// StorageVersions returns a StorageVersionInformer.
    +	StorageVersions() StorageVersionInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// StorageVersions returns a StorageVersionInformer.
    +func (v *version) StorageVersions() StorageVersionInformer {
    +	return &storageVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go
    new file mode 100644
    index 0000000000..34175b522d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageVersionInformer provides access to a shared informer and lister for
    +// StorageVersions.
    +type StorageVersionInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.StorageVersionLister
    +}
    +
    +type storageVersionInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewStorageVersionInformer constructs a new informer for StorageVersion type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStorageVersionInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStorageVersionInformer constructs a new informer for StorageVersion type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.InternalV1alpha1().StorageVersions().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.InternalV1alpha1().StorageVersions().Watch(context.TODO(), options)
    +			},
    +		},
    +		&apiserverinternalv1alpha1.StorageVersion{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *storageVersionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStorageVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *storageVersionInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&apiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer)
    +}
    +
    +func (f *storageVersionInformer) Lister() v1alpha1.StorageVersionLister {
    +	return v1alpha1.NewStorageVersionLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/interface.go b/vendor/k8s.io/client-go/informers/apps/interface.go
    new file mode 100644
    index 0000000000..02eefe5842
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package apps
    +
    +import (
    +	v1 "k8s.io/client-go/informers/apps/v1"
    +	v1beta1 "k8s.io/client-go/informers/apps/v1beta1"
    +	v1beta2 "k8s.io/client-go/informers/apps/v1beta2"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +	// V1beta2 provides access to shared informers for resources in V1beta2.
    +	V1beta2() v1beta2.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta2 returns a new v1beta2.Interface.
    +func (g *group) V1beta2() v1beta2.Interface {
    +	return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
    new file mode 100644
    index 0000000000..31e2b74d0f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1 "k8s.io/api/apps/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/apps/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionInformer provides access to a shared informer and lister for
    +// ControllerRevisions.
    +type ControllerRevisionInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ControllerRevisionLister
    +}
    +
    +type controllerRevisionInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().ControllerRevisions(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().ControllerRevisions(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1.ControllerRevision{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1.ControllerRevision{}, f.defaultInformer)
    +}
    +
    +func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister {
    +	return v1.NewControllerRevisionLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
    new file mode 100644
    index 0000000000..da7fe9509b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1 "k8s.io/api/apps/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/apps/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetInformer provides access to a shared informer and lister for
    +// DaemonSets.
    +type DaemonSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.DaemonSetLister
    +}
    +
    +type daemonSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().DaemonSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().DaemonSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1.DaemonSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1.DaemonSet{}, f.defaultInformer)
    +}
    +
    +func (f *daemonSetInformer) Lister() v1.DaemonSetLister {
    +	return v1.NewDaemonSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go
    new file mode 100644
    index 0000000000..bd639bb3d9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1 "k8s.io/api/apps/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/apps/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentInformer provides access to a shared informer and lister for
    +// Deployments.
    +type DeploymentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.DeploymentLister
    +}
    +
    +type deploymentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().Deployments(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().Deployments(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1.Deployment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1.Deployment{}, f.defaultInformer)
    +}
    +
    +func (f *deploymentInformer) Lister() v1.DeploymentLister {
    +	return v1.NewDeploymentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1/interface.go
    new file mode 100644
    index 0000000000..fab1e76bd9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/interface.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ControllerRevisions returns a ControllerRevisionInformer.
    +	ControllerRevisions() ControllerRevisionInformer
    +	// DaemonSets returns a DaemonSetInformer.
    +	DaemonSets() DaemonSetInformer
    +	// Deployments returns a DeploymentInformer.
    +	Deployments() DeploymentInformer
    +	// ReplicaSets returns a ReplicaSetInformer.
    +	ReplicaSets() ReplicaSetInformer
    +	// StatefulSets returns a StatefulSetInformer.
    +	StatefulSets() StatefulSetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ControllerRevisions returns a ControllerRevisionInformer.
    +func (v *version) ControllerRevisions() ControllerRevisionInformer {
    +	return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// DaemonSets returns a DaemonSetInformer.
    +func (v *version) DaemonSets() DaemonSetInformer {
    +	return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Deployments returns a DeploymentInformer.
    +func (v *version) Deployments() DeploymentInformer {
    +	return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ReplicaSets returns a ReplicaSetInformer.
    +func (v *version) ReplicaSets() ReplicaSetInformer {
    +	return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// StatefulSets returns a StatefulSetInformer.
    +func (v *version) StatefulSets() StatefulSetInformer {
    +	return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
    new file mode 100644
    index 0000000000..6d81a471a4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1 "k8s.io/api/apps/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/apps/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetInformer provides access to a shared informer and lister for
    +// ReplicaSets.
    +type ReplicaSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ReplicaSetLister
    +}
    +
    +type replicaSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().ReplicaSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1.ReplicaSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1.ReplicaSet{}, f.defaultInformer)
    +}
    +
    +func (f *replicaSetInformer) Lister() v1.ReplicaSetLister {
    +	return v1.NewReplicaSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
    new file mode 100644
    index 0000000000..c99bbb73ed
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1 "k8s.io/api/apps/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/apps/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetInformer provides access to a shared informer and lister for
    +// StatefulSets.
    +type StatefulSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.StatefulSetLister
    +}
    +
    +type statefulSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().StatefulSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1().StatefulSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1.StatefulSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1.StatefulSet{}, f.defaultInformer)
    +}
    +
    +func (f *statefulSetInformer) Lister() v1.StatefulSetLister {
    +	return v1.NewStatefulSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
    new file mode 100644
    index 0000000000..cb36bd7fd8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta1 "k8s.io/api/apps/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionInformer provides access to a shared informer and lister for
    +// ControllerRevisions.
    +type ControllerRevisionInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ControllerRevisionLister
    +}
    +
    +type controllerRevisionInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().ControllerRevisions(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().ControllerRevisions(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta1.ControllerRevision{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta1.ControllerRevision{}, f.defaultInformer)
    +}
    +
    +func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister {
    +	return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
    new file mode 100644
    index 0000000000..e02a13c2f4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta1 "k8s.io/api/apps/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentInformer provides access to a shared informer and lister for
    +// Deployments.
    +type DeploymentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.DeploymentLister
    +}
    +
    +type deploymentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().Deployments(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().Deployments(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta1.Deployment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta1.Deployment{}, f.defaultInformer)
    +}
    +
    +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister {
    +	return v1beta1.NewDeploymentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go
    new file mode 100644
    index 0000000000..326939cd12
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go
    @@ -0,0 +1,59 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ControllerRevisions returns a ControllerRevisionInformer.
    +	ControllerRevisions() ControllerRevisionInformer
    +	// Deployments returns a DeploymentInformer.
    +	Deployments() DeploymentInformer
    +	// StatefulSets returns a StatefulSetInformer.
    +	StatefulSets() StatefulSetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ControllerRevisions returns a ControllerRevisionInformer.
    +func (v *version) ControllerRevisions() ControllerRevisionInformer {
    +	return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Deployments returns a DeploymentInformer.
    +func (v *version) Deployments() DeploymentInformer {
    +	return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// StatefulSets returns a StatefulSetInformer.
    +func (v *version) StatefulSets() StatefulSetInformer {
    +	return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
    new file mode 100644
    index 0000000000..b845cc99c9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta1 "k8s.io/api/apps/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetInformer provides access to a shared informer and lister for
    +// StatefulSets.
    +type StatefulSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.StatefulSetLister
    +}
    +
    +type statefulSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().StatefulSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta1().StatefulSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta1.StatefulSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta1.StatefulSet{}, f.defaultInformer)
    +}
    +
    +func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister {
    +	return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
    new file mode 100644
    index 0000000000..4d0e91320b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta2 "k8s.io/api/apps/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionInformer provides access to a shared informer and lister for
    +// ControllerRevisions.
    +type ControllerRevisionInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.ControllerRevisionLister
    +}
    +
    +type controllerRevisionInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().ControllerRevisions(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().ControllerRevisions(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta2.ControllerRevision{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta2.ControllerRevision{}, f.defaultInformer)
    +}
    +
    +func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister {
    +	return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
    new file mode 100644
    index 0000000000..280e2fe465
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta2 "k8s.io/api/apps/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetInformer provides access to a shared informer and lister for
    +// DaemonSets.
    +type DaemonSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.DaemonSetLister
    +}
    +
    +type daemonSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().DaemonSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().DaemonSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta2.DaemonSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta2.DaemonSet{}, f.defaultInformer)
    +}
    +
    +func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister {
    +	return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
    new file mode 100644
    index 0000000000..67bdb79720
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta2 "k8s.io/api/apps/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentInformer provides access to a shared informer and lister for
    +// Deployments.
    +type DeploymentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.DeploymentLister
    +}
    +
    +type deploymentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().Deployments(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().Deployments(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta2.Deployment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta2.Deployment{}, f.defaultInformer)
    +}
    +
    +func (f *deploymentInformer) Lister() v1beta2.DeploymentLister {
    +	return v1beta2.NewDeploymentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go
    new file mode 100644
    index 0000000000..ded89bd5be
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ControllerRevisions returns a ControllerRevisionInformer.
    +	ControllerRevisions() ControllerRevisionInformer
    +	// DaemonSets returns a DaemonSetInformer.
    +	DaemonSets() DaemonSetInformer
    +	// Deployments returns a DeploymentInformer.
    +	Deployments() DeploymentInformer
    +	// ReplicaSets returns a ReplicaSetInformer.
    +	ReplicaSets() ReplicaSetInformer
    +	// StatefulSets returns a StatefulSetInformer.
    +	StatefulSets() StatefulSetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ControllerRevisions returns a ControllerRevisionInformer.
    +func (v *version) ControllerRevisions() ControllerRevisionInformer {
    +	return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// DaemonSets returns a DaemonSetInformer.
    +func (v *version) DaemonSets() DaemonSetInformer {
    +	return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Deployments returns a DeploymentInformer.
    +func (v *version) Deployments() DeploymentInformer {
    +	return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ReplicaSets returns a ReplicaSetInformer.
    +func (v *version) ReplicaSets() ReplicaSetInformer {
    +	return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// StatefulSets returns a StatefulSetInformer.
    +func (v *version) StatefulSets() StatefulSetInformer {
    +	return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
    new file mode 100644
    index 0000000000..85d12bb65d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta2 "k8s.io/api/apps/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetInformer provides access to a shared informer and lister for
    +// ReplicaSets.
    +type ReplicaSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.ReplicaSetLister
    +}
    +
    +type replicaSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().ReplicaSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().ReplicaSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta2.ReplicaSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta2.ReplicaSet{}, f.defaultInformer)
    +}
    +
    +func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister {
    +	return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
    new file mode 100644
    index 0000000000..2fab6f7b2b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	appsv1beta2 "k8s.io/api/apps/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetInformer provides access to a shared informer and lister for
    +// StatefulSets.
    +type StatefulSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.StatefulSetLister
    +}
    +
    +type statefulSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().StatefulSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AppsV1beta2().StatefulSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&appsv1beta2.StatefulSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&appsv1beta2.StatefulSet{}, f.defaultInformer)
    +}
    +
    +func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister {
    +	return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/interface.go
    new file mode 100644
    index 0000000000..2b3b2d0e50
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/interface.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package autoscaling
    +
    +import (
    +	v1 "k8s.io/client-go/informers/autoscaling/v1"
    +	v2 "k8s.io/client-go/informers/autoscaling/v2"
    +	v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1"
    +	v2beta2 "k8s.io/client-go/informers/autoscaling/v2beta2"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V2 provides access to shared informers for resources in V2.
    +	V2() v2.Interface
    +	// V2beta1 provides access to shared informers for resources in V2beta1.
    +	V2beta1() v2beta1.Interface
    +	// V2beta2 provides access to shared informers for resources in V2beta2.
    +	V2beta2() v2beta2.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V2 returns a new v2.Interface.
    +func (g *group) V2() v2.Interface {
    +	return v2.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V2beta1 returns a new v2beta1.Interface.
    +func (g *group) V2beta1() v2beta1.Interface {
    +	return v2beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V2beta2 returns a new v2beta2.Interface.
    +func (g *group) V2beta2() v2beta2.Interface {
    +	return v2beta2.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..44f041e906
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	autoscalingv1 "k8s.io/api/autoscaling/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/autoscaling/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
    +// HorizontalPodAutoscalers.
    +type HorizontalPodAutoscalerInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.HorizontalPodAutoscalerLister
    +}
    +
    +type horizontalPodAutoscalerInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&autoscalingv1.HorizontalPodAutoscaler{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&autoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister {
    +	return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go
    new file mode 100644
    index 0000000000..601d0f77f1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +	HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
    +	return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..5ddb3b015f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	autoscalingv2 "k8s.io/api/autoscaling/v2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v2 "k8s.io/client-go/listers/autoscaling/v2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
    +// HorizontalPodAutoscalers.
    +type HorizontalPodAutoscalerInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v2.HorizontalPodAutoscalerLister
    +}
    +
    +type horizontalPodAutoscalerInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&autoscalingv2.HorizontalPodAutoscaler{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&autoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Lister() v2.HorizontalPodAutoscalerLister {
    +	return v2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go
    new file mode 100644
    index 0000000000..2c71908e40
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +	HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
    +	return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..6385a2a190
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
    +// HorizontalPodAutoscalers.
    +type HorizontalPodAutoscalerInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v2beta1.HorizontalPodAutoscalerLister
    +}
    +
    +type horizontalPodAutoscalerInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&autoscalingv2beta1.HorizontalPodAutoscaler{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&autoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister {
    +	return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go
    new file mode 100644
    index 0000000000..ff5d44b09d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +	HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
    +	return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..f1ac3f0737
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v2beta2 "k8s.io/client-go/listers/autoscaling/v2beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
    +// HorizontalPodAutoscalers.
    +type HorizontalPodAutoscalerInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v2beta2.HorizontalPodAutoscalerLister
    +}
    +
    +type horizontalPodAutoscalerInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&autoscalingv2beta2.HorizontalPodAutoscaler{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&autoscalingv2beta2.HorizontalPodAutoscaler{}, f.defaultInformer)
    +}
    +
    +func (f *horizontalPodAutoscalerInformer) Lister() v2beta2.HorizontalPodAutoscalerLister {
    +	return v2beta2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/interface.go
    new file mode 100644
    index 0000000000..e482c57925
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v2beta2
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +	HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
    +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
    +	return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/interface.go b/vendor/k8s.io/client-go/informers/batch/interface.go
    new file mode 100644
    index 0000000000..53b81c7ecc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/interface.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package batch
    +
    +import (
    +	v1 "k8s.io/client-go/informers/batch/v1"
    +	v1beta1 "k8s.io/client-go/informers/batch/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go
    new file mode 100644
    index 0000000000..fdfb655134
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	batchv1 "k8s.io/api/batch/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/batch/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CronJobInformer provides access to a shared informer and lister for
    +// CronJobs.
    +type CronJobInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.CronJobLister
    +}
    +
    +type cronJobInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewCronJobInformer constructs a new informer for CronJob type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCronJobInformer constructs a new informer for CronJob type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1().CronJobs(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1().CronJobs(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&batchv1.CronJob{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cronJobInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&batchv1.CronJob{}, f.defaultInformer)
    +}
    +
    +func (f *cronJobInformer) Lister() v1.CronJobLister {
    +	return v1.NewCronJobLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/v1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1/interface.go
    new file mode 100644
    index 0000000000..84567fb592
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/v1/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CronJobs returns a CronJobInformer.
    +	CronJobs() CronJobInformer
    +	// Jobs returns a JobInformer.
    +	Jobs() JobInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CronJobs returns a CronJobInformer.
    +func (v *version) CronJobs() CronJobInformer {
    +	return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Jobs returns a JobInformer.
    +func (v *version) Jobs() JobInformer {
    +	return &jobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/v1/job.go b/vendor/k8s.io/client-go/informers/batch/v1/job.go
    new file mode 100644
    index 0000000000..4992f52286
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/v1/job.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	batchv1 "k8s.io/api/batch/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/batch/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// JobInformer provides access to a shared informer and lister for
    +// Jobs.
    +type JobInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.JobLister
    +}
    +
    +type jobInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewJobInformer constructs a new informer for Job type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredJobInformer constructs a new informer for Job type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1().Jobs(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1().Jobs(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&batchv1.Job{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *jobInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&batchv1.Job{}, f.defaultInformer)
    +}
    +
    +func (f *jobInformer) Lister() v1.JobLister {
    +	return v1.NewJobLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go
    new file mode 100644
    index 0000000000..820c93eaaa
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	batchv1beta1 "k8s.io/api/batch/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/batch/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CronJobInformer provides access to a shared informer and lister for
    +// CronJobs.
    +type CronJobInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.CronJobLister
    +}
    +
    +type cronJobInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewCronJobInformer constructs a new informer for CronJob type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCronJobInformer constructs a new informer for CronJob type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1beta1().CronJobs(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.BatchV1beta1().CronJobs(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&batchv1beta1.CronJob{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cronJobInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&batchv1beta1.CronJob{}, f.defaultInformer)
    +}
    +
    +func (f *cronJobInformer) Lister() v1beta1.CronJobLister {
    +	return v1beta1.NewCronJobLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go
    new file mode 100644
    index 0000000000..76cae22d68
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CronJobs returns a CronJobInformer.
    +	CronJobs() CronJobInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CronJobs returns a CronJobInformer.
    +func (v *version) CronJobs() CronJobInformer {
    +	return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/interface.go b/vendor/k8s.io/client-go/informers/certificates/interface.go
    new file mode 100644
    index 0000000000..39a4e29111
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package certificates
    +
    +import (
    +	v1 "k8s.io/client-go/informers/certificates/v1"
    +	v1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/certificates/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go
    new file mode 100644
    index 0000000000..73d33a914c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	certificatesv1 "k8s.io/api/certificates/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/certificates/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CertificateSigningRequestInformer provides access to a shared informer and lister for
    +// CertificateSigningRequests.
    +type CertificateSigningRequestInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.CertificateSigningRequestLister
    +}
    +
    +type certificateSigningRequestInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1().CertificateSigningRequests().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1().CertificateSigningRequests().Watch(context.TODO(), options)
    +			},
    +		},
    +		&certificatesv1.CertificateSigningRequest{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&certificatesv1.CertificateSigningRequest{}, f.defaultInformer)
    +}
    +
    +func (f *certificateSigningRequestInformer) Lister() v1.CertificateSigningRequestLister {
    +	return v1.NewCertificateSigningRequestLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1/interface.go
    new file mode 100644
    index 0000000000..91ccfb715d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CertificateSigningRequests returns a CertificateSigningRequestInformer.
    +	CertificateSigningRequests() CertificateSigningRequestInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CertificateSigningRequests returns a CertificateSigningRequestInformer.
    +func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer {
    +	return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go
    new file mode 100644
    index 0000000000..e8b3415870
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterTrustBundleInformer provides access to a shared informer and lister for
    +// ClusterTrustBundles.
    +type ClusterTrustBundleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ClusterTrustBundleLister
    +}
    +
    +type clusterTrustBundleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterTrustBundleInformer constructs a new informer for ClusterTrustBundle type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterTrustBundleInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterTrustBundleInformer constructs a new informer for ClusterTrustBundle type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1alpha1().ClusterTrustBundles().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1alpha1().ClusterTrustBundles().Watch(context.TODO(), options)
    +			},
    +		},
    +		&certificatesv1alpha1.ClusterTrustBundle{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterTrustBundleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterTrustBundleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterTrustBundleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&certificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer)
    +}
    +
    +func (f *clusterTrustBundleInformer) Lister() v1alpha1.ClusterTrustBundleLister {
    +	return v1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..40ce8f42db
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ClusterTrustBundles returns a ClusterTrustBundleInformer.
    +	ClusterTrustBundles() ClusterTrustBundleInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ClusterTrustBundles returns a ClusterTrustBundleInformer.
    +func (v *version) ClusterTrustBundles() ClusterTrustBundleInformer {
    +	return &clusterTrustBundleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go
    new file mode 100644
    index 0000000000..4e167ab8b1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/certificates/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CertificateSigningRequestInformer provides access to a shared informer and lister for
    +// CertificateSigningRequests.
    +type CertificateSigningRequestInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.CertificateSigningRequestLister
    +}
    +
    +type certificateSigningRequestInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), options)
    +			},
    +		},
    +		&certificatesv1beta1.CertificateSigningRequest{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&certificatesv1beta1.CertificateSigningRequest{}, f.defaultInformer)
    +}
    +
    +func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister {
    +	return v1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go
    new file mode 100644
    index 0000000000..258dd1d0e6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CertificateSigningRequests returns a CertificateSigningRequestInformer.
    +	CertificateSigningRequests() CertificateSigningRequestInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CertificateSigningRequests returns a CertificateSigningRequestInformer.
    +func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer {
    +	return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go
    new file mode 100644
    index 0000000000..026b4d9476
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package coordination
    +
    +import (
    +	v1 "k8s.io/client-go/informers/coordination/v1"
    +	v1alpha1 "k8s.io/client-go/informers/coordination/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/coordination/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1/interface.go
    new file mode 100644
    index 0000000000..05c4acbef8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// Leases returns a LeaseInformer.
    +	Leases() LeaseInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// Leases returns a LeaseInformer.
    +func (v *version) Leases() LeaseInformer {
    +	return &leaseInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go
    new file mode 100644
    index 0000000000..e538923a86
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	coordinationv1 "k8s.io/api/coordination/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/coordination/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseInformer provides access to a shared informer and lister for
    +// Leases.
    +type LeaseInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.LeaseLister
    +}
    +
    +type leaseInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewLeaseInformer constructs a new informer for Lease type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredLeaseInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredLeaseInformer constructs a new informer for Lease type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1().Leases(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1().Leases(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&coordinationv1.Lease{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredLeaseInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *leaseInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&coordinationv1.Lease{}, f.defaultInformer)
    +}
    +
    +func (f *leaseInformer) Lister() v1.LeaseLister {
    +	return v1.NewLeaseLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..4058af2806
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// LeaseCandidates returns a LeaseCandidateInformer.
    +	LeaseCandidates() LeaseCandidateInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// LeaseCandidates returns a LeaseCandidateInformer.
    +func (v *version) LeaseCandidates() LeaseCandidateInformer {
    +	return &leaseCandidateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go
    new file mode 100644
    index 0000000000..21bc47a8e6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/coordination/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseCandidateInformer provides access to a shared informer and lister for
    +// LeaseCandidates.
    +type LeaseCandidateInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.LeaseCandidateLister
    +}
    +
    +type leaseCandidateInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewLeaseCandidateInformer constructs a new informer for LeaseCandidate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredLeaseCandidateInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredLeaseCandidateInformer constructs a new informer for LeaseCandidate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1alpha1().LeaseCandidates(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1alpha1().LeaseCandidates(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&coordinationv1alpha1.LeaseCandidate{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *leaseCandidateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredLeaseCandidateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *leaseCandidateInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&coordinationv1alpha1.LeaseCandidate{}, f.defaultInformer)
    +}
    +
    +func (f *leaseCandidateInformer) Lister() v1alpha1.LeaseCandidateLister {
    +	return v1alpha1.NewLeaseCandidateLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1beta1/interface.go
    new file mode 100644
    index 0000000000..360266206c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// Leases returns a LeaseInformer.
    +	Leases() LeaseInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// Leases returns a LeaseInformer.
    +func (v *version) Leases() LeaseInformer {
    +	return &leaseInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go
    new file mode 100644
    index 0000000000..5a6959c0ba
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/coordination/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseInformer provides access to a shared informer and lister for
    +// Leases.
    +type LeaseInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.LeaseLister
    +}
    +
    +type leaseInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewLeaseInformer constructs a new informer for Lease type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredLeaseInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredLeaseInformer constructs a new informer for Lease type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1beta1().Leases(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoordinationV1beta1().Leases(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&coordinationv1beta1.Lease{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredLeaseInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *leaseInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&coordinationv1beta1.Lease{}, f.defaultInformer)
    +}
    +
    +func (f *leaseInformer) Lister() v1beta1.LeaseLister {
    +	return v1beta1.NewLeaseLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/interface.go b/vendor/k8s.io/client-go/informers/core/interface.go
    new file mode 100644
    index 0000000000..de8396b516
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/interface.go
    @@ -0,0 +1,46 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package core
    +
    +import (
    +	v1 "k8s.io/client-go/informers/core/v1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go
    new file mode 100644
    index 0000000000..ccdee535bc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ComponentStatusInformer provides access to a shared informer and lister for
    +// ComponentStatuses.
    +type ComponentStatusInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ComponentStatusLister
    +}
    +
    +type componentStatusInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewComponentStatusInformer constructs a new informer for ComponentStatus type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ComponentStatuses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.ComponentStatus{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *componentStatusInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.ComponentStatus{}, f.defaultInformer)
    +}
    +
    +func (f *componentStatusInformer) Lister() v1.ComponentStatusLister {
    +	return v1.NewComponentStatusLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go
    new file mode 100644
    index 0000000000..6253581784
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ConfigMapInformer provides access to a shared informer and lister for
    +// ConfigMaps.
    +type ConfigMapInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ConfigMapLister
    +}
    +
    +type configMapInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewConfigMapInformer constructs a new informer for ConfigMap type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ConfigMaps(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.ConfigMap{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *configMapInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.ConfigMap{}, f.defaultInformer)
    +}
    +
    +func (f *configMapInformer) Lister() v1.ConfigMapLister {
    +	return v1.NewConfigMapLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go
    new file mode 100644
    index 0000000000..cd0f25b7f7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointsInformer provides access to a shared informer and lister for
    +// Endpoints.
    +type EndpointsInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.EndpointsLister
    +}
    +
    +type endpointsInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEndpointsInformer constructs a new informer for Endpoints type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Endpoints(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Endpoints{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *endpointsInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Endpoints{}, f.defaultInformer)
    +}
    +
    +func (f *endpointsInformer) Lister() v1.EndpointsLister {
    +	return v1.NewEndpointsLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go
    new file mode 100644
    index 0000000000..8825e9b7a4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EventInformer provides access to a shared informer and lister for
    +// Events.
    +type EventInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.EventLister
    +}
    +
    +type eventInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Events(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Events(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Event{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *eventInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Event{}, f.defaultInformer)
    +}
    +
    +func (f *eventInformer) Lister() v1.EventLister {
    +	return v1.NewEventLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/interface.go b/vendor/k8s.io/client-go/informers/core/v1/interface.go
    new file mode 100644
    index 0000000000..b2216a05c8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/interface.go
    @@ -0,0 +1,150 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ComponentStatuses returns a ComponentStatusInformer.
    +	ComponentStatuses() ComponentStatusInformer
    +	// ConfigMaps returns a ConfigMapInformer.
    +	ConfigMaps() ConfigMapInformer
    +	// Endpoints returns a EndpointsInformer.
    +	Endpoints() EndpointsInformer
    +	// Events returns a EventInformer.
    +	Events() EventInformer
    +	// LimitRanges returns a LimitRangeInformer.
    +	LimitRanges() LimitRangeInformer
    +	// Namespaces returns a NamespaceInformer.
    +	Namespaces() NamespaceInformer
    +	// Nodes returns a NodeInformer.
    +	Nodes() NodeInformer
    +	// PersistentVolumes returns a PersistentVolumeInformer.
    +	PersistentVolumes() PersistentVolumeInformer
    +	// PersistentVolumeClaims returns a PersistentVolumeClaimInformer.
    +	PersistentVolumeClaims() PersistentVolumeClaimInformer
    +	// Pods returns a PodInformer.
    +	Pods() PodInformer
    +	// PodTemplates returns a PodTemplateInformer.
    +	PodTemplates() PodTemplateInformer
    +	// ReplicationControllers returns a ReplicationControllerInformer.
    +	ReplicationControllers() ReplicationControllerInformer
    +	// ResourceQuotas returns a ResourceQuotaInformer.
    +	ResourceQuotas() ResourceQuotaInformer
    +	// Secrets returns a SecretInformer.
    +	Secrets() SecretInformer
    +	// Services returns a ServiceInformer.
    +	Services() ServiceInformer
    +	// ServiceAccounts returns a ServiceAccountInformer.
    +	ServiceAccounts() ServiceAccountInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ComponentStatuses returns a ComponentStatusInformer.
    +func (v *version) ComponentStatuses() ComponentStatusInformer {
    +	return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ConfigMaps returns a ConfigMapInformer.
    +func (v *version) ConfigMaps() ConfigMapInformer {
    +	return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Endpoints returns a EndpointsInformer.
    +func (v *version) Endpoints() EndpointsInformer {
    +	return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Events returns a EventInformer.
    +func (v *version) Events() EventInformer {
    +	return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// LimitRanges returns a LimitRangeInformer.
    +func (v *version) LimitRanges() LimitRangeInformer {
    +	return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Namespaces returns a NamespaceInformer.
    +func (v *version) Namespaces() NamespaceInformer {
    +	return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Nodes returns a NodeInformer.
    +func (v *version) Nodes() NodeInformer {
    +	return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PersistentVolumes returns a PersistentVolumeInformer.
    +func (v *version) PersistentVolumes() PersistentVolumeInformer {
    +	return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PersistentVolumeClaims returns a PersistentVolumeClaimInformer.
    +func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer {
    +	return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Pods returns a PodInformer.
    +func (v *version) Pods() PodInformer {
    +	return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PodTemplates returns a PodTemplateInformer.
    +func (v *version) PodTemplates() PodTemplateInformer {
    +	return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ReplicationControllers returns a ReplicationControllerInformer.
    +func (v *version) ReplicationControllers() ReplicationControllerInformer {
    +	return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ResourceQuotas returns a ResourceQuotaInformer.
    +func (v *version) ResourceQuotas() ResourceQuotaInformer {
    +	return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Secrets returns a SecretInformer.
    +func (v *version) Secrets() SecretInformer {
    +	return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Services returns a ServiceInformer.
    +func (v *version) Services() ServiceInformer {
    +	return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ServiceAccounts returns a ServiceAccountInformer.
    +func (v *version) ServiceAccounts() ServiceAccountInformer {
    +	return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go
    new file mode 100644
    index 0000000000..4cbfda1f7a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// LimitRangeInformer provides access to a shared informer and lister for
    +// LimitRanges.
    +type LimitRangeInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.LimitRangeLister
    +}
    +
    +type limitRangeInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewLimitRangeInformer constructs a new informer for LimitRange type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().LimitRanges(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.LimitRange{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.LimitRange{}, f.defaultInformer)
    +}
    +
    +func (f *limitRangeInformer) Lister() v1.LimitRangeLister {
    +	return v1.NewLimitRangeLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go
    new file mode 100644
    index 0000000000..506f930a7d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// NamespaceInformer provides access to a shared informer and lister for
    +// Namespaces.
    +type NamespaceInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.NamespaceLister
    +}
    +
    +type namespaceInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewNamespaceInformer constructs a new informer for Namespace type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredNamespaceInformer constructs a new informer for Namespace type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Namespaces().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Namespaces().Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Namespace{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Namespace{}, f.defaultInformer)
    +}
    +
    +func (f *namespaceInformer) Lister() v1.NamespaceLister {
    +	return v1.NewNamespaceLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go
    new file mode 100644
    index 0000000000..9939fc2cb6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// NodeInformer provides access to a shared informer and lister for
    +// Nodes.
    +type NodeInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.NodeLister
    +}
    +
    +type nodeInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewNodeInformer constructs a new informer for Node type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredNodeInformer constructs a new informer for Node type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Nodes().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Nodes().Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Node{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *nodeInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Node{}, f.defaultInformer)
    +}
    +
    +func (f *nodeInformer) Lister() v1.NodeLister {
    +	return v1.NewNodeLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go
    new file mode 100644
    index 0000000000..c82445997c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PersistentVolumeInformer provides access to a shared informer and lister for
    +// PersistentVolumes.
    +type PersistentVolumeInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PersistentVolumeLister
    +}
    +
    +type persistentVolumeInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPersistentVolumeInformer constructs a new informer for PersistentVolume type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PersistentVolumes().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.PersistentVolume{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.PersistentVolume{}, f.defaultInformer)
    +}
    +
    +func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister {
    +	return v1.NewPersistentVolumeLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go
    new file mode 100644
    index 0000000000..7a7df1cff8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PersistentVolumeClaimInformer provides access to a shared informer and lister for
    +// PersistentVolumeClaims.
    +type PersistentVolumeClaimInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PersistentVolumeClaimLister
    +}
    +
    +type persistentVolumeClaimInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.PersistentVolumeClaim{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.PersistentVolumeClaim{}, f.defaultInformer)
    +}
    +
    +func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister {
    +	return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go
    new file mode 100644
    index 0000000000..5c713a9b6f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PodInformer provides access to a shared informer and lister for
    +// Pods.
    +type PodInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PodLister
    +}
    +
    +type podInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPodInformer constructs a new informer for Pod type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPodInformer constructs a new informer for Pod type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Pods(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Pods(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Pod{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *podInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Pod{}, f.defaultInformer)
    +}
    +
    +func (f *podInformer) Lister() v1.PodLister {
    +	return v1.NewPodLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go
    new file mode 100644
    index 0000000000..2a16e910db
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PodTemplateInformer provides access to a shared informer and lister for
    +// PodTemplates.
    +type PodTemplateInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PodTemplateLister
    +}
    +
    +type podTemplateInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPodTemplateInformer constructs a new informer for PodTemplate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PodTemplates(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.PodTemplate{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *podTemplateInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.PodTemplate{}, f.defaultInformer)
    +}
    +
    +func (f *podTemplateInformer) Lister() v1.PodTemplateLister {
    +	return v1.NewPodTemplateLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go
    new file mode 100644
    index 0000000000..930beb4cd5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicationControllerInformer provides access to a shared informer and lister for
    +// ReplicationControllers.
    +type ReplicationControllerInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ReplicationControllerLister
    +}
    +
    +type replicationControllerInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewReplicationControllerInformer constructs a new informer for ReplicationController type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ReplicationControllers(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.ReplicationController{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.ReplicationController{}, f.defaultInformer)
    +}
    +
    +func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister {
    +	return v1.NewReplicationControllerLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go
    new file mode 100644
    index 0000000000..619262a612
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceQuotaInformer provides access to a shared informer and lister for
    +// ResourceQuotas.
    +type ResourceQuotaInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ResourceQuotaLister
    +}
    +
    +type resourceQuotaInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewResourceQuotaInformer constructs a new informer for ResourceQuota type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ResourceQuotas(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.ResourceQuota{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.ResourceQuota{}, f.defaultInformer)
    +}
    +
    +func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister {
    +	return v1.NewResourceQuotaLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go
    new file mode 100644
    index 0000000000..a6be070693
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// SecretInformer provides access to a shared informer and lister for
    +// Secrets.
    +type SecretInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.SecretLister
    +}
    +
    +type secretInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewSecretInformer constructs a new informer for Secret type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredSecretInformer constructs a new informer for Secret type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Secrets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Secret{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *secretInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Secret{}, f.defaultInformer)
    +}
    +
    +func (f *secretInformer) Lister() v1.SecretLister {
    +	return v1.NewSecretLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go
    new file mode 100644
    index 0000000000..3d9ecc6e95
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceInformer provides access to a shared informer and lister for
    +// Services.
    +type ServiceInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ServiceLister
    +}
    +
    +type serviceInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewServiceInformer constructs a new informer for Service type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredServiceInformer constructs a new informer for Service type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Services(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().Services(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.Service{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *serviceInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.Service{}, f.defaultInformer)
    +}
    +
    +func (f *serviceInformer) Lister() v1.ServiceLister {
    +	return v1.NewServiceLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go
    new file mode 100644
    index 0000000000..44371c9fa4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	corev1 "k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/core/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceAccountInformer provides access to a shared informer and lister for
    +// ServiceAccounts.
    +type ServiceAccountInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ServiceAccountLister
    +}
    +
    +type serviceAccountInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewServiceAccountInformer constructs a new informer for ServiceAccount type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ServiceAccounts(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&corev1.ServiceAccount{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&corev1.ServiceAccount{}, f.defaultInformer)
    +}
    +
    +func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister {
    +	return v1.NewServiceAccountLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/discovery/interface.go b/vendor/k8s.io/client-go/informers/discovery/interface.go
    new file mode 100644
    index 0000000000..37da9371f6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/discovery/interface.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package discovery
    +
    +import (
    +	v1 "k8s.io/client-go/informers/discovery/v1"
    +	v1beta1 "k8s.io/client-go/informers/discovery/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go
    new file mode 100644
    index 0000000000..6c6c3372bf
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	discoveryv1 "k8s.io/api/discovery/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/discovery/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointSliceInformer provides access to a shared informer and lister for
    +// EndpointSlices.
    +type EndpointSliceInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.EndpointSliceLister
    +}
    +
    +type endpointSliceInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.DiscoveryV1().EndpointSlices(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&discoveryv1.EndpointSlice{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&discoveryv1.EndpointSlice{}, f.defaultInformer)
    +}
    +
    +func (f *endpointSliceInformer) Lister() v1.EndpointSliceLister {
    +	return v1.NewEndpointSliceLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/discovery/v1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1/interface.go
    new file mode 100644
    index 0000000000..d90c63c0a9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/discovery/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// EndpointSlices returns a EndpointSliceInformer.
    +	EndpointSlices() EndpointSliceInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// EndpointSlices returns a EndpointSliceInformer.
    +func (v *version) EndpointSlices() EndpointSliceInformer {
    +	return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go
    new file mode 100644
    index 0000000000..69ae38a91a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/discovery/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointSliceInformer provides access to a shared informer and lister for
    +// EndpointSlices.
    +type EndpointSliceInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.EndpointSliceLister
    +}
    +
    +type endpointSliceInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&discoveryv1beta1.EndpointSlice{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&discoveryv1beta1.EndpointSlice{}, f.defaultInformer)
    +}
    +
    +func (f *endpointSliceInformer) Lister() v1beta1.EndpointSliceLister {
    +	return v1beta1.NewEndpointSliceLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go
    new file mode 100644
    index 0000000000..4661646e01
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// EndpointSlices returns a EndpointSliceInformer.
    +	EndpointSlices() EndpointSliceInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// EndpointSlices returns a EndpointSliceInformer.
    +func (v *version) EndpointSlices() EndpointSliceInformer {
    +	return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/doc.go b/vendor/k8s.io/client-go/informers/doc.go
    new file mode 100644
    index 0000000000..f37c3e4d01
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/doc.go
    @@ -0,0 +1,18 @@
    +/*
    +Copyright 2023 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Package informers provides generated informers for Kubernetes APIs.
    +package informers // import "k8s.io/client-go/informers"
    diff --git a/vendor/k8s.io/client-go/informers/events/interface.go b/vendor/k8s.io/client-go/informers/events/interface.go
    new file mode 100644
    index 0000000000..b350dde5b6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/events/interface.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package events
    +
    +import (
    +	v1 "k8s.io/client-go/informers/events/v1"
    +	v1beta1 "k8s.io/client-go/informers/events/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/events/v1/event.go b/vendor/k8s.io/client-go/informers/events/v1/event.go
    new file mode 100644
    index 0000000000..f8d35ee15c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/events/v1/event.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	eventsv1 "k8s.io/api/events/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/events/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EventInformer provides access to a shared informer and lister for
    +// Events.
    +type EventInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.EventLister
    +}
    +
    +type eventInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.EventsV1().Events(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.EventsV1().Events(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&eventsv1.Event{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *eventInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&eventsv1.Event{}, f.defaultInformer)
    +}
    +
    +func (f *eventInformer) Lister() v1.EventLister {
    +	return v1.NewEventLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/events/v1/interface.go b/vendor/k8s.io/client-go/informers/events/v1/interface.go
    new file mode 100644
    index 0000000000..cd06e23359
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/events/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// Events returns a EventInformer.
    +	Events() EventInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// Events returns a EventInformer.
    +func (v *version) Events() EventInformer {
    +	return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go
    new file mode 100644
    index 0000000000..025f6a5cf3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	eventsv1beta1 "k8s.io/api/events/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/events/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// EventInformer provides access to a shared informer and lister for
    +// Events.
    +type EventInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.EventLister
    +}
    +
    +type eventInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredEventInformer constructs a new informer for Event type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.EventsV1beta1().Events(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.EventsV1beta1().Events(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&eventsv1beta1.Event{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *eventInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&eventsv1beta1.Event{}, f.defaultInformer)
    +}
    +
    +func (f *eventInformer) Lister() v1beta1.EventLister {
    +	return v1beta1.NewEventLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go
    new file mode 100644
    index 0000000000..c71888c9a4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// Events returns a EventInformer.
    +	Events() EventInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// Events returns a EventInformer.
    +func (v *version) Events() EventInformer {
    +	return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/interface.go b/vendor/k8s.io/client-go/informers/extensions/interface.go
    new file mode 100644
    index 0000000000..94a66d3853
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/interface.go
    @@ -0,0 +1,46 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package extensions
    +
    +import (
    +	v1beta1 "k8s.io/client-go/informers/extensions/v1beta1"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go
    new file mode 100644
    index 0000000000..050080a598
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetInformer provides access to a shared informer and lister for
    +// DaemonSets.
    +type DaemonSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.DaemonSetLister
    +}
    +
    +type daemonSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().DaemonSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&extensionsv1beta1.DaemonSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&extensionsv1beta1.DaemonSet{}, f.defaultInformer)
    +}
    +
    +func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister {
    +	return v1beta1.NewDaemonSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go
    new file mode 100644
    index 0000000000..1b16c5cc91
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentInformer provides access to a shared informer and lister for
    +// Deployments.
    +type DeploymentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.DeploymentLister
    +}
    +
    +type deploymentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().Deployments(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().Deployments(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&extensionsv1beta1.Deployment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&extensionsv1beta1.Deployment{}, f.defaultInformer)
    +}
    +
    +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister {
    +	return v1beta1.NewDeploymentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go
    new file mode 100644
    index 0000000000..f01a887617
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressInformer provides access to a shared informer and lister for
    +// Ingresses.
    +type IngressInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.IngressLister
    +}
    +
    +type ingressInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().Ingresses(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().Ingresses(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&extensionsv1beta1.Ingress{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *ingressInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&extensionsv1beta1.Ingress{}, f.defaultInformer)
    +}
    +
    +func (f *ingressInformer) Lister() v1beta1.IngressLister {
    +	return v1beta1.NewIngressLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go
    new file mode 100644
    index 0000000000..600741e3a2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// DaemonSets returns a DaemonSetInformer.
    +	DaemonSets() DaemonSetInformer
    +	// Deployments returns a DeploymentInformer.
    +	Deployments() DeploymentInformer
    +	// Ingresses returns a IngressInformer.
    +	Ingresses() IngressInformer
    +	// NetworkPolicies returns a NetworkPolicyInformer.
    +	NetworkPolicies() NetworkPolicyInformer
    +	// ReplicaSets returns a ReplicaSetInformer.
    +	ReplicaSets() ReplicaSetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// DaemonSets returns a DaemonSetInformer.
    +func (v *version) DaemonSets() DaemonSetInformer {
    +	return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Deployments returns a DeploymentInformer.
    +func (v *version) Deployments() DeploymentInformer {
    +	return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Ingresses returns a IngressInformer.
    +func (v *version) Ingresses() IngressInformer {
    +	return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// NetworkPolicies returns a NetworkPolicyInformer.
    +func (v *version) NetworkPolicies() NetworkPolicyInformer {
    +	return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ReplicaSets returns a ReplicaSetInformer.
    +func (v *version) ReplicaSets() ReplicaSetInformer {
    +	return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go
    new file mode 100644
    index 0000000000..4a924619fb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// NetworkPolicyInformer provides access to a shared informer and lister for
    +// NetworkPolicies.
    +type NetworkPolicyInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.NetworkPolicyLister
    +}
    +
    +type networkPolicyInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&extensionsv1beta1.NetworkPolicy{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer)
    +}
    +
    +func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister {
    +	return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go
    new file mode 100644
    index 0000000000..f7e224bcfb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetInformer provides access to a shared informer and lister for
    +// ReplicaSets.
    +type ReplicaSetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ReplicaSetLister
    +}
    +
    +type replicaSetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().ReplicaSets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&extensionsv1beta1.ReplicaSet{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&extensionsv1beta1.ReplicaSet{}, f.defaultInformer)
    +}
    +
    +func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister {
    +	return v1beta1.NewReplicaSetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go
    new file mode 100644
    index 0000000000..86c24551ef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/factory.go
    @@ -0,0 +1,376 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package informers
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +	time "time"
    +
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	schema "k8s.io/apimachinery/pkg/runtime/schema"
    +	admissionregistration "k8s.io/client-go/informers/admissionregistration"
    +	apiserverinternal "k8s.io/client-go/informers/apiserverinternal"
    +	apps "k8s.io/client-go/informers/apps"
    +	autoscaling "k8s.io/client-go/informers/autoscaling"
    +	batch "k8s.io/client-go/informers/batch"
    +	certificates "k8s.io/client-go/informers/certificates"
    +	coordination "k8s.io/client-go/informers/coordination"
    +	core "k8s.io/client-go/informers/core"
    +	discovery "k8s.io/client-go/informers/discovery"
    +	events "k8s.io/client-go/informers/events"
    +	extensions "k8s.io/client-go/informers/extensions"
    +	flowcontrol "k8s.io/client-go/informers/flowcontrol"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	networking "k8s.io/client-go/informers/networking"
    +	node "k8s.io/client-go/informers/node"
    +	policy "k8s.io/client-go/informers/policy"
    +	rbac "k8s.io/client-go/informers/rbac"
    +	resource "k8s.io/client-go/informers/resource"
    +	scheduling "k8s.io/client-go/informers/scheduling"
    +	storage "k8s.io/client-go/informers/storage"
    +	storagemigration "k8s.io/client-go/informers/storagemigration"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// SharedInformerOption defines the functional option type for SharedInformerFactory.
    +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
    +
    +type sharedInformerFactory struct {
    +	client           kubernetes.Interface
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	lock             sync.Mutex
    +	defaultResync    time.Duration
    +	customResync     map[reflect.Type]time.Duration
    +	transform        cache.TransformFunc
    +
    +	informers map[reflect.Type]cache.SharedIndexInformer
    +	// startedInformers is used for tracking which informers have been started.
    +	// This allows Start() to be called multiple times safely.
    +	startedInformers map[reflect.Type]bool
    +	// wg tracks how many goroutines were started.
    +	wg sync.WaitGroup
    +	// shuttingDown is true when Shutdown has been called. It may still be running
    +	// because it needs to wait for goroutines.
    +	shuttingDown bool
    +}
    +
    +// WithCustomResyncConfig sets a custom resync period for the specified informer types.
    +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
    +	return func(factory *sharedInformerFactory) *sharedInformerFactory {
    +		for k, v := range resyncConfig {
    +			factory.customResync[reflect.TypeOf(k)] = v
    +		}
    +		return factory
    +	}
    +}
    +
    +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
    +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
    +	return func(factory *sharedInformerFactory) *sharedInformerFactory {
    +		factory.tweakListOptions = tweakListOptions
    +		return factory
    +	}
    +}
    +
    +// WithNamespace limits the SharedInformerFactory to the specified namespace.
    +func WithNamespace(namespace string) SharedInformerOption {
    +	return func(factory *sharedInformerFactory) *sharedInformerFactory {
    +		factory.namespace = namespace
    +		return factory
    +	}
    +}
    +
    +// WithTransform sets a transform on all informers.
    +func WithTransform(transform cache.TransformFunc) SharedInformerOption {
    +	return func(factory *sharedInformerFactory) *sharedInformerFactory {
    +		factory.transform = transform
    +		return factory
    +	}
    +}
    +
    +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
    +func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory {
    +	return NewSharedInformerFactoryWithOptions(client, defaultResync)
    +}
    +
    +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
    +// Listers obtained via this SharedInformerFactory will be subject to the same filters
    +// as specified here.
    +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
    +func NewFilteredSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
    +	return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
    +}
    +
    +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
    +func NewSharedInformerFactoryWithOptions(client kubernetes.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
    +	factory := &sharedInformerFactory{
    +		client:           client,
    +		namespace:        v1.NamespaceAll,
    +		defaultResync:    defaultResync,
    +		informers:        make(map[reflect.Type]cache.SharedIndexInformer),
    +		startedInformers: make(map[reflect.Type]bool),
    +		customResync:     make(map[reflect.Type]time.Duration),
    +	}
    +
    +	// Apply all options
    +	for _, opt := range options {
    +		factory = opt(factory)
    +	}
    +
    +	return factory
    +}
    +
    +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
    +	f.lock.Lock()
    +	defer f.lock.Unlock()
    +
    +	if f.shuttingDown {
    +		return
    +	}
    +
    +	for informerType, informer := range f.informers {
    +		if !f.startedInformers[informerType] {
    +			f.wg.Add(1)
    +			// We need a new variable in each loop iteration,
    +			// otherwise the goroutine would use the loop variable
    +			// and that keeps changing.
    +			informer := informer
    +			go func() {
    +				defer f.wg.Done()
    +				informer.Run(stopCh)
    +			}()
    +			f.startedInformers[informerType] = true
    +		}
    +	}
    +}
    +
    +func (f *sharedInformerFactory) Shutdown() {
    +	f.lock.Lock()
    +	f.shuttingDown = true
    +	f.lock.Unlock()
    +
    +	// Will return immediately if there is nothing to wait for.
    +	f.wg.Wait()
    +}
    +
    +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
    +	informers := func() map[reflect.Type]cache.SharedIndexInformer {
    +		f.lock.Lock()
    +		defer f.lock.Unlock()
    +
    +		informers := map[reflect.Type]cache.SharedIndexInformer{}
    +		for informerType, informer := range f.informers {
    +			if f.startedInformers[informerType] {
    +				informers[informerType] = informer
    +			}
    +		}
    +		return informers
    +	}()
    +
    +	res := map[reflect.Type]bool{}
    +	for informType, informer := range informers {
    +		res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
    +	}
    +	return res
    +}
    +
    +// InformerFor returns the SharedIndexInformer for obj using an internal
    +// client.
    +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
    +	f.lock.Lock()
    +	defer f.lock.Unlock()
    +
    +	informerType := reflect.TypeOf(obj)
    +	informer, exists := f.informers[informerType]
    +	if exists {
    +		return informer
    +	}
    +
    +	resyncPeriod, exists := f.customResync[informerType]
    +	if !exists {
    +		resyncPeriod = f.defaultResync
    +	}
    +
    +	informer = newFunc(f.client, resyncPeriod)
    +	informer.SetTransform(f.transform)
    +	f.informers[informerType] = informer
    +
    +	return informer
    +}
    +
    +// SharedInformerFactory provides shared informers for resources in all known
    +// API group versions.
    +//
    +// It is typically used like this:
    +//
    +//	ctx, cancel := context.Background()
    +//	defer cancel()
    +//	factory := NewSharedInformerFactory(client, resyncPeriod)
    +//	defer factory.WaitForStop()    // Returns immediately if nothing was started.
    +//	genericInformer := factory.ForResource(resource)
    +//	typedInformer := factory.SomeAPIGroup().V1().SomeType()
    +//	factory.Start(ctx.Done())          // Start processing these informers.
    +//	synced := factory.WaitForCacheSync(ctx.Done())
    +//	for v, ok := range synced {
    +//	    if !ok {
    +//	        fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
    +//	        return
    +//	    }
    +//	}
    +//
    +//	// Creating informers can also be created after Start, but then
    +//	// Start must be called again:
    +//	anotherGenericInformer := factory.ForResource(resource)
    +//	factory.Start(ctx.Done())
    +type SharedInformerFactory interface {
    +	internalinterfaces.SharedInformerFactory
    +
    +	// Start initializes all requested informers. They are handled in goroutines
    +	// which run until the stop channel gets closed.
    +	// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
    +	Start(stopCh <-chan struct{})
    +
    +	// Shutdown marks a factory as shutting down. At that point no new
    +	// informers can be started anymore and Start will return without
    +	// doing anything.
    +	//
    +	// In addition, Shutdown blocks until all goroutines have terminated. For that
    +	// to happen, the close channel(s) that they were started with must be closed,
    +	// either before Shutdown gets called or while it is waiting.
    +	//
    +	// Shutdown may be called multiple times, even concurrently. All such calls will
    +	// block until all goroutines have terminated.
    +	Shutdown()
    +
    +	// WaitForCacheSync blocks until all started informers' caches were synced
    +	// or the stop channel gets closed.
    +	WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
    +
    +	// ForResource gives generic access to a shared informer of the matching type.
    +	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
    +
    +	// InformerFor returns the SharedIndexInformer for obj using an internal
    +	// client.
    +	InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
    +
    +	Admissionregistration() admissionregistration.Interface
    +	Internal() apiserverinternal.Interface
    +	Apps() apps.Interface
    +	Autoscaling() autoscaling.Interface
    +	Batch() batch.Interface
    +	Certificates() certificates.Interface
    +	Coordination() coordination.Interface
    +	Core() core.Interface
    +	Discovery() discovery.Interface
    +	Events() events.Interface
    +	Extensions() extensions.Interface
    +	Flowcontrol() flowcontrol.Interface
    +	Networking() networking.Interface
    +	Node() node.Interface
    +	Policy() policy.Interface
    +	Rbac() rbac.Interface
    +	Resource() resource.Interface
    +	Scheduling() scheduling.Interface
    +	Storage() storage.Interface
    +	Storagemigration() storagemigration.Interface
    +}
    +
    +func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface {
    +	return admissionregistration.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Internal() apiserverinternal.Interface {
    +	return apiserverinternal.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Apps() apps.Interface {
    +	return apps.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface {
    +	return autoscaling.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Batch() batch.Interface {
    +	return batch.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Certificates() certificates.Interface {
    +	return certificates.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Coordination() coordination.Interface {
    +	return coordination.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Core() core.Interface {
    +	return core.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Discovery() discovery.Interface {
    +	return discovery.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Events() events.Interface {
    +	return events.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Extensions() extensions.Interface {
    +	return extensions.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Flowcontrol() flowcontrol.Interface {
    +	return flowcontrol.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Networking() networking.Interface {
    +	return networking.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Node() node.Interface {
    +	return node.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Policy() policy.Interface {
    +	return policy.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Rbac() rbac.Interface {
    +	return rbac.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Resource() resource.Interface {
    +	return resource.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Scheduling() scheduling.Interface {
    +	return scheduling.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Storage() storage.Interface {
    +	return storage.New(f, f.namespace, f.tweakListOptions)
    +}
    +
    +func (f *sharedInformerFactory) Storagemigration() storagemigration.Interface {
    +	return storagemigration.New(f, f.namespace, f.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go
    new file mode 100644
    index 0000000000..48dd9a8a11
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package flowcontrol
    +
    +import (
    +	v1 "k8s.io/client-go/informers/flowcontrol/v1"
    +	v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1"
    +	v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2"
    +	v1beta3 "k8s.io/client-go/informers/flowcontrol/v1beta3"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +	// V1beta2 provides access to shared informers for resources in V1beta2.
    +	V1beta2() v1beta2.Interface
    +	// V1beta3 provides access to shared informers for resources in V1beta3.
    +	V1beta3() v1beta3.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta2 returns a new v1beta2.Interface.
    +func (g *group) V1beta2() v1beta2.Interface {
    +	return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta3 returns a new v1beta3.Interface.
    +func (g *group) V1beta3() v1beta3.Interface {
    +	return v1beta3.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go
    new file mode 100644
    index 0000000000..30c41b189b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1 "k8s.io/api/flowcontrol/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/flowcontrol/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaInformer provides access to a shared informer and lister for
    +// FlowSchemas.
    +type FlowSchemaInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.FlowSchemaLister
    +}
    +
    +type flowSchemaInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1().FlowSchemas().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1().FlowSchemas().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1.FlowSchema{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1.FlowSchema{}, f.defaultInformer)
    +}
    +
    +func (f *flowSchemaInformer) Lister() v1.FlowSchemaLister {
    +	return v1.NewFlowSchemaLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go
    new file mode 100644
    index 0000000000..3de934900f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// FlowSchemas returns a FlowSchemaInformer.
    +	FlowSchemas() FlowSchemaInformer
    +	// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +	PriorityLevelConfigurations() PriorityLevelConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// FlowSchemas returns a FlowSchemaInformer.
    +func (v *version) FlowSchemas() FlowSchemaInformer {
    +	return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer {
    +	return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..7092c25725
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1 "k8s.io/api/flowcontrol/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/flowcontrol/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for
    +// PriorityLevelConfigurations.
    +type PriorityLevelConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PriorityLevelConfigurationLister
    +}
    +
    +type priorityLevelConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1().PriorityLevelConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1.PriorityLevelConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Lister() v1.PriorityLevelConfigurationLister {
    +	return v1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go
    new file mode 100644
    index 0000000000..13f4ff0933
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaInformer provides access to a shared informer and lister for
    +// FlowSchemas.
    +type FlowSchemaInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.FlowSchemaLister
    +}
    +
    +type flowSchemaInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta1().FlowSchemas().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta1().FlowSchemas().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta1.FlowSchema{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta1.FlowSchema{}, f.defaultInformer)
    +}
    +
    +func (f *flowSchemaInformer) Lister() v1beta1.FlowSchemaLister {
    +	return v1beta1.NewFlowSchemaLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go
    new file mode 100644
    index 0000000000..50329bb0ac
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// FlowSchemas returns a FlowSchemaInformer.
    +	FlowSchemas() FlowSchemaInformer
    +	// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +	PriorityLevelConfigurations() PriorityLevelConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// FlowSchemas returns a FlowSchemaInformer.
    +func (v *version) FlowSchemas() FlowSchemaInformer {
    +	return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer {
    +	return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..fa4835906a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for
    +// PriorityLevelConfigurations.
    +type PriorityLevelConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.PriorityLevelConfigurationLister
    +}
    +
    +type priorityLevelConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta1().PriorityLevelConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta1.PriorityLevelConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Lister() v1beta1.PriorityLevelConfigurationLister {
    +	return v1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go
    new file mode 100644
    index 0000000000..6f6abecea8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaInformer provides access to a shared informer and lister for
    +// FlowSchemas.
    +type FlowSchemaInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.FlowSchemaLister
    +}
    +
    +type flowSchemaInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta2().FlowSchemas().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta2().FlowSchemas().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta2.FlowSchema{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta2.FlowSchema{}, f.defaultInformer)
    +}
    +
    +func (f *flowSchemaInformer) Lister() v1beta2.FlowSchemaLister {
    +	return v1beta2.NewFlowSchemaLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go
    new file mode 100644
    index 0000000000..142d552896
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// FlowSchemas returns a FlowSchemaInformer.
    +	FlowSchemas() FlowSchemaInformer
    +	// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +	PriorityLevelConfigurations() PriorityLevelConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// FlowSchemas returns a FlowSchemaInformer.
    +func (v *version) FlowSchemas() FlowSchemaInformer {
    +	return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer {
    +	return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..306a901851
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for
    +// PriorityLevelConfigurations.
    +type PriorityLevelConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta2.PriorityLevelConfigurationLister
    +}
    +
    +type priorityLevelConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta2().PriorityLevelConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta2().PriorityLevelConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta2.PriorityLevelConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Lister() v1beta2.PriorityLevelConfigurationLister {
    +	return v1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go
    new file mode 100644
    index 0000000000..56d8c8b112
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaInformer provides access to a shared informer and lister for
    +// FlowSchemas.
    +type FlowSchemaInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta3.FlowSchemaLister
    +}
    +
    +type flowSchemaInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta3().FlowSchemas().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta3().FlowSchemas().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta3.FlowSchema{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta3.FlowSchema{}, f.defaultInformer)
    +}
    +
    +func (f *flowSchemaInformer) Lister() v1beta3.FlowSchemaLister {
    +	return v1beta3.NewFlowSchemaLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go
    new file mode 100644
    index 0000000000..54c5414a2b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// FlowSchemas returns a FlowSchemaInformer.
    +	FlowSchemas() FlowSchemaInformer
    +	// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +	PriorityLevelConfigurations() PriorityLevelConfigurationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// FlowSchemas returns a FlowSchemaInformer.
    +func (v *version) FlowSchemas() FlowSchemaInformer {
    +	return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer.
    +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer {
    +	return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..71f8d5b07f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for
    +// PriorityLevelConfigurations.
    +type PriorityLevelConfigurationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta3.PriorityLevelConfigurationLister
    +}
    +
    +type priorityLevelConfigurationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta3().PriorityLevelConfigurations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.FlowcontrolV1beta3().PriorityLevelConfigurations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&flowcontrolv1beta3.PriorityLevelConfiguration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&flowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer)
    +}
    +
    +func (f *priorityLevelConfigurationInformer) Lister() v1beta3.PriorityLevelConfigurationLister {
    +	return v1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go
    new file mode 100644
    index 0000000000..39a9d3bf4f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/generic.go
    @@ -0,0 +1,439 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package informers
    +
    +import (
    +	"fmt"
    +
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
    +	appsv1 "k8s.io/api/apps/v1"
    +	appsv1beta1 "k8s.io/api/apps/v1beta1"
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	autoscalingv1 "k8s.io/api/autoscaling/v1"
    +	v2 "k8s.io/api/autoscaling/v2"
    +	v2beta1 "k8s.io/api/autoscaling/v2beta1"
    +	v2beta2 "k8s.io/api/autoscaling/v2beta2"
    +	batchv1 "k8s.io/api/batch/v1"
    +	batchv1beta1 "k8s.io/api/batch/v1beta1"
    +	certificatesv1 "k8s.io/api/certificates/v1"
    +	certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
    +	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
    +	coordinationv1 "k8s.io/api/coordination/v1"
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
    +	corev1 "k8s.io/api/core/v1"
    +	discoveryv1 "k8s.io/api/discovery/v1"
    +	discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
    +	eventsv1 "k8s.io/api/events/v1"
    +	eventsv1beta1 "k8s.io/api/events/v1beta1"
    +	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
    +	flowcontrolv1 "k8s.io/api/flowcontrol/v1"
    +	flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	networkingv1 "k8s.io/api/networking/v1"
    +	networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	nodev1 "k8s.io/api/node/v1"
    +	nodev1alpha1 "k8s.io/api/node/v1alpha1"
    +	nodev1beta1 "k8s.io/api/node/v1beta1"
    +	policyv1 "k8s.io/api/policy/v1"
    +	policyv1beta1 "k8s.io/api/policy/v1beta1"
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	schedulingv1 "k8s.io/api/scheduling/v1"
    +	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
    +	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
    +	storagev1 "k8s.io/api/storage/v1"
    +	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
    +	schema "k8s.io/apimachinery/pkg/runtime/schema"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
    +// sharedInformers based on type
    +type GenericInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() cache.GenericLister
    +}
    +
    +type genericInformer struct {
    +	informer cache.SharedIndexInformer
    +	resource schema.GroupResource
    +}
    +
    +// Informer returns the SharedIndexInformer.
    +func (f *genericInformer) Informer() cache.SharedIndexInformer {
    +	return f.informer
    +}
    +
    +// Lister returns the GenericLister.
    +func (f *genericInformer) Lister() cache.GenericLister {
    +	return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
    +}
    +
    +// ForResource gives generic access to a shared informer of the matching type
    +// TODO extend this to unknown resources with a client pool
    +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
    +	switch resource {
    +	// Group=admissionregistration.k8s.io, Version=v1
    +	case v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()}, nil
    +	case v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingAdmissionPolicies().Informer()}, nil
    +	case v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingAdmissionPolicyBindings().Informer()}, nil
    +	case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil
    +
    +		// Group=admissionregistration.k8s.io, Version=v1alpha1
    +	case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()}, nil
    +	case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()}, nil
    +
    +		// Group=admissionregistration.k8s.io, Version=v1beta1
    +	case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil
    +	case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicies().Informer()}, nil
    +	case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicyBindings().Informer()}, nil
    +	case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil
    +
    +		// Group=apps, Version=v1
    +	case appsv1.SchemeGroupVersion.WithResource("controllerrevisions"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil
    +	case appsv1.SchemeGroupVersion.WithResource("daemonsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil
    +	case appsv1.SchemeGroupVersion.WithResource("deployments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil
    +	case appsv1.SchemeGroupVersion.WithResource("replicasets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil
    +	case appsv1.SchemeGroupVersion.WithResource("statefulsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil
    +
    +		// Group=apps, Version=v1beta1
    +	case appsv1beta1.SchemeGroupVersion.WithResource("controllerrevisions"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().ControllerRevisions().Informer()}, nil
    +	case appsv1beta1.SchemeGroupVersion.WithResource("deployments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().Deployments().Informer()}, nil
    +	case appsv1beta1.SchemeGroupVersion.WithResource("statefulsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().StatefulSets().Informer()}, nil
    +
    +		// Group=apps, Version=v1beta2
    +	case v1beta2.SchemeGroupVersion.WithResource("controllerrevisions"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ControllerRevisions().Informer()}, nil
    +	case v1beta2.SchemeGroupVersion.WithResource("daemonsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().DaemonSets().Informer()}, nil
    +	case v1beta2.SchemeGroupVersion.WithResource("deployments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().Deployments().Informer()}, nil
    +	case v1beta2.SchemeGroupVersion.WithResource("replicasets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ReplicaSets().Informer()}, nil
    +	case v1beta2.SchemeGroupVersion.WithResource("statefulsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil
    +
    +		// Group=autoscaling, Version=v1
    +	case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil
    +
    +		// Group=autoscaling, Version=v2
    +	case v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2().HorizontalPodAutoscalers().Informer()}, nil
    +
    +		// Group=autoscaling, Version=v2beta1
    +	case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil
    +
    +		// Group=autoscaling, Version=v2beta2
    +	case v2beta2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta2().HorizontalPodAutoscalers().Informer()}, nil
    +
    +		// Group=batch, Version=v1
    +	case batchv1.SchemeGroupVersion.WithResource("cronjobs"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1().CronJobs().Informer()}, nil
    +	case batchv1.SchemeGroupVersion.WithResource("jobs"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1().Jobs().Informer()}, nil
    +
    +		// Group=batch, Version=v1beta1
    +	case batchv1beta1.SchemeGroupVersion.WithResource("cronjobs"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1beta1().CronJobs().Informer()}, nil
    +
    +		// Group=certificates.k8s.io, Version=v1
    +	case certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1().CertificateSigningRequests().Informer()}, nil
    +
    +		// Group=certificates.k8s.io, Version=v1alpha1
    +	case certificatesv1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().ClusterTrustBundles().Informer()}, nil
    +
    +		// Group=certificates.k8s.io, Version=v1beta1
    +	case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil
    +
    +		// Group=coordination.k8s.io, Version=v1
    +	case coordinationv1.SchemeGroupVersion.WithResource("leases"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil
    +
    +		// Group=coordination.k8s.io, Version=v1alpha1
    +	case coordinationv1alpha1.SchemeGroupVersion.WithResource("leasecandidates"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha1().LeaseCandidates().Informer()}, nil
    +
    +		// Group=coordination.k8s.io, Version=v1beta1
    +	case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil
    +
    +		// Group=core, Version=v1
    +	case corev1.SchemeGroupVersion.WithResource("componentstatuses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ComponentStatuses().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("configmaps"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ConfigMaps().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("endpoints"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Endpoints().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("events"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Events().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("limitranges"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().LimitRanges().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("namespaces"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Namespaces().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("nodes"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Nodes().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("persistentvolumes"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumes().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumeClaims().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("pods"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Pods().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("podtemplates"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PodTemplates().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("replicationcontrollers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ReplicationControllers().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("resourcequotas"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ResourceQuotas().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("secrets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Secrets().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("services"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Services().Informer()}, nil
    +	case corev1.SchemeGroupVersion.WithResource("serviceaccounts"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil
    +
    +		// Group=discovery.k8s.io, Version=v1
    +	case discoveryv1.SchemeGroupVersion.WithResource("endpointslices"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1().EndpointSlices().Informer()}, nil
    +
    +		// Group=discovery.k8s.io, Version=v1beta1
    +	case discoveryv1beta1.SchemeGroupVersion.WithResource("endpointslices"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1beta1().EndpointSlices().Informer()}, nil
    +
    +		// Group=events.k8s.io, Version=v1
    +	case eventsv1.SchemeGroupVersion.WithResource("events"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1().Events().Informer()}, nil
    +
    +		// Group=events.k8s.io, Version=v1beta1
    +	case eventsv1beta1.SchemeGroupVersion.WithResource("events"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil
    +
    +		// Group=extensions, Version=v1beta1
    +	case extensionsv1beta1.SchemeGroupVersion.WithResource("daemonsets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().DaemonSets().Informer()}, nil
    +	case extensionsv1beta1.SchemeGroupVersion.WithResource("deployments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil
    +	case extensionsv1beta1.SchemeGroupVersion.WithResource("ingresses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil
    +	case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil
    +	case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1
    +	case flowcontrolv1.SchemeGroupVersion.WithResource("flowschemas"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().FlowSchemas().Informer()}, nil
    +	case flowcontrolv1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().PriorityLevelConfigurations().Informer()}, nil
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta1
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithResource("flowschemas"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().FlowSchemas().Informer()}, nil
    +	case flowcontrolv1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().PriorityLevelConfigurations().Informer()}, nil
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta2
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithResource("flowschemas"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().FlowSchemas().Informer()}, nil
    +	case flowcontrolv1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().PriorityLevelConfigurations().Informer()}, nil
    +
    +		// Group=flowcontrol.apiserver.k8s.io, Version=v1beta3
    +	case v1beta3.SchemeGroupVersion.WithResource("flowschemas"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().FlowSchemas().Informer()}, nil
    +	case v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().PriorityLevelConfigurations().Informer()}, nil
    +
    +		// Group=internal.apiserver.k8s.io, Version=v1alpha1
    +	case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil
    +
    +		// Group=networking.k8s.io, Version=v1
    +	case networkingv1.SchemeGroupVersion.WithResource("ingresses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().Ingresses().Informer()}, nil
    +	case networkingv1.SchemeGroupVersion.WithResource("ingressclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().IngressClasses().Informer()}, nil
    +	case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil
    +
    +		// Group=networking.k8s.io, Version=v1alpha1
    +	case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil
    +	case networkingv1alpha1.SchemeGroupVersion.WithResource("servicecidrs"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil
    +
    +		// Group=networking.k8s.io, Version=v1beta1
    +	case networkingv1beta1.SchemeGroupVersion.WithResource("ipaddresses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IPAddresses().Informer()}, nil
    +	case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil
    +	case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil
    +	case networkingv1beta1.SchemeGroupVersion.WithResource("servicecidrs"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().ServiceCIDRs().Informer()}, nil
    +
    +		// Group=node.k8s.io, Version=v1
    +	case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1().RuntimeClasses().Informer()}, nil
    +
    +		// Group=node.k8s.io, Version=v1alpha1
    +	case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil
    +
    +		// Group=node.k8s.io, Version=v1beta1
    +	case nodev1beta1.SchemeGroupVersion.WithResource("runtimeclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1beta1().RuntimeClasses().Informer()}, nil
    +
    +		// Group=policy, Version=v1
    +	case policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1().PodDisruptionBudgets().Informer()}, nil
    +
    +		// Group=policy, Version=v1beta1
    +	case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1
    +	case rbacv1.SchemeGroupVersion.WithResource("clusterroles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoles().Informer()}, nil
    +	case rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoleBindings().Informer()}, nil
    +	case rbacv1.SchemeGroupVersion.WithResource("roles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().Roles().Informer()}, nil
    +	case rbacv1.SchemeGroupVersion.WithResource("rolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().RoleBindings().Informer()}, nil
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1alpha1
    +	case rbacv1alpha1.SchemeGroupVersion.WithResource("clusterroles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoles().Informer()}, nil
    +	case rbacv1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoleBindings().Informer()}, nil
    +	case rbacv1alpha1.SchemeGroupVersion.WithResource("roles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().Roles().Informer()}, nil
    +	case rbacv1alpha1.SchemeGroupVersion.WithResource("rolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().RoleBindings().Informer()}, nil
    +
    +		// Group=rbac.authorization.k8s.io, Version=v1beta1
    +	case rbacv1beta1.SchemeGroupVersion.WithResource("clusterroles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoles().Informer()}, nil
    +	case rbacv1beta1.SchemeGroupVersion.WithResource("clusterrolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoleBindings().Informer()}, nil
    +	case rbacv1beta1.SchemeGroupVersion.WithResource("roles"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().Roles().Informer()}, nil
    +	case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil
    +
    +		// Group=resource.k8s.io, Version=v1alpha3
    +	case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil
    +	case v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().PodSchedulingContexts().Informer()}, nil
    +	case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil
    +	case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaimTemplates().Informer()}, nil
    +	case v1alpha3.SchemeGroupVersion.WithResource("resourceslices"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceSlices().Informer()}, nil
    +
    +		// Group=scheduling.k8s.io, Version=v1
    +	case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil
    +
    +		// Group=scheduling.k8s.io, Version=v1alpha1
    +	case schedulingv1alpha1.SchemeGroupVersion.WithResource("priorityclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil
    +
    +		// Group=scheduling.k8s.io, Version=v1beta1
    +	case schedulingv1beta1.SchemeGroupVersion.WithResource("priorityclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1beta1().PriorityClasses().Informer()}, nil
    +
    +		// Group=storage.k8s.io, Version=v1
    +	case storagev1.SchemeGroupVersion.WithResource("csidrivers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIDrivers().Informer()}, nil
    +	case storagev1.SchemeGroupVersion.WithResource("csinodes"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSINodes().Informer()}, nil
    +	case storagev1.SchemeGroupVersion.WithResource("csistoragecapacities"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIStorageCapacities().Informer()}, nil
    +	case storagev1.SchemeGroupVersion.WithResource("storageclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil
    +	case storagev1.SchemeGroupVersion.WithResource("volumeattachments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().VolumeAttachments().Informer()}, nil
    +
    +		// Group=storage.k8s.io, Version=v1alpha1
    +	case storagev1alpha1.SchemeGroupVersion.WithResource("csistoragecapacities"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().CSIStorageCapacities().Informer()}, nil
    +	case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil
    +	case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttributesClasses().Informer()}, nil
    +
    +		// Group=storage.k8s.io, Version=v1beta1
    +	case storagev1beta1.SchemeGroupVersion.WithResource("csidrivers"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSIDrivers().Informer()}, nil
    +	case storagev1beta1.SchemeGroupVersion.WithResource("csinodes"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSINodes().Informer()}, nil
    +	case storagev1beta1.SchemeGroupVersion.WithResource("csistoragecapacities"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSIStorageCapacities().Informer()}, nil
    +	case storagev1beta1.SchemeGroupVersion.WithResource("storageclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil
    +	case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttachments().Informer()}, nil
    +	case storagev1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttributesClasses().Informer()}, nil
    +
    +		// Group=storagemigration.k8s.io, Version=v1alpha1
    +	case storagemigrationv1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"):
    +		return &genericInformer{resource: resource.GroupResource(), informer: f.Storagemigration().V1alpha1().StorageVersionMigrations().Informer()}, nil
    +
    +	}
    +
    +	return nil, fmt.Errorf("no informer found for %v", resource)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go
    new file mode 100644
    index 0000000000..b00ed70cfd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go
    @@ -0,0 +1,40 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package internalinterfaces
    +
    +import (
    +	time "time"
    +
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer.
    +type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer
    +
    +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
    +type SharedInformerFactory interface {
    +	Start(stopCh <-chan struct{})
    +	InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
    +}
    +
    +// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
    +type TweakListOptionsFunc func(*v1.ListOptions)
    diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go
    new file mode 100644
    index 0000000000..1c775c465b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package networking
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/networking/v1"
    +	v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/networking/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1/ingress.go b/vendor/k8s.io/client-go/informers/networking/v1/ingress.go
    new file mode 100644
    index 0000000000..06c317ad31
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1/ingress.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1 "k8s.io/api/networking/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/networking/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressInformer provides access to a shared informer and lister for
    +// Ingresses.
    +type IngressInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.IngressLister
    +}
    +
    +type ingressInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().Ingresses(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().Ingresses(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1.Ingress{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *ingressInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1.Ingress{}, f.defaultInformer)
    +}
    +
    +func (f *ingressInformer) Lister() v1.IngressLister {
    +	return v1.NewIngressLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go
    new file mode 100644
    index 0000000000..15514745bf
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1 "k8s.io/api/networking/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/networking/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressClassInformer provides access to a shared informer and lister for
    +// IngressClasses.
    +type IngressClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.IngressClassLister
    +}
    +
    +type ingressClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewIngressClassInformer constructs a new informer for IngressClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIngressClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIngressClassInformer constructs a new informer for IngressClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().IngressClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().IngressClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1.IngressClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIngressClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *ingressClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1.IngressClass{}, f.defaultInformer)
    +}
    +
    +func (f *ingressClassInformer) Lister() v1.IngressClassLister {
    +	return v1.NewIngressClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1/interface.go
    new file mode 100644
    index 0000000000..a48d92c4ef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1/interface.go
    @@ -0,0 +1,59 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// Ingresses returns a IngressInformer.
    +	Ingresses() IngressInformer
    +	// IngressClasses returns a IngressClassInformer.
    +	IngressClasses() IngressClassInformer
    +	// NetworkPolicies returns a NetworkPolicyInformer.
    +	NetworkPolicies() NetworkPolicyInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// Ingresses returns a IngressInformer.
    +func (v *version) Ingresses() IngressInformer {
    +	return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// IngressClasses returns a IngressClassInformer.
    +func (v *version) IngressClasses() IngressClassInformer {
    +	return &ingressClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// NetworkPolicies returns a NetworkPolicyInformer.
    +func (v *version) NetworkPolicies() NetworkPolicyInformer {
    +	return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go
    new file mode 100644
    index 0000000000..a75c9ac21f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1 "k8s.io/api/networking/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/networking/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// NetworkPolicyInformer provides access to a shared informer and lister for
    +// NetworkPolicies.
    +type NetworkPolicyInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.NetworkPolicyLister
    +}
    +
    +type networkPolicyInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().NetworkPolicies(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1().NetworkPolicies(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1.NetworkPolicy{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1.NetworkPolicy{}, f.defaultInformer)
    +}
    +
    +func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister {
    +	return v1.NewNetworkPolicyLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..ae9883b55f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go
    @@ -0,0 +1,52 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// IPAddresses returns a IPAddressInformer.
    +	IPAddresses() IPAddressInformer
    +	// ServiceCIDRs returns a ServiceCIDRInformer.
    +	ServiceCIDRs() ServiceCIDRInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// IPAddresses returns a IPAddressInformer.
    +func (v *version) IPAddresses() IPAddressInformer {
    +	return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ServiceCIDRs returns a ServiceCIDRInformer.
    +func (v *version) ServiceCIDRs() ServiceCIDRInformer {
    +	return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go
    new file mode 100644
    index 0000000000..a1083dbf0a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IPAddressInformer provides access to a shared informer and lister for
    +// IPAddresses.
    +type IPAddressInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.IPAddressLister
    +}
    +
    +type iPAddressInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewIPAddressInformer constructs a new informer for IPAddress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1alpha1().IPAddresses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1alpha1().IPAddresses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1alpha1.IPAddress{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1alpha1.IPAddress{}, f.defaultInformer)
    +}
    +
    +func (f *iPAddressInformer) Lister() v1alpha1.IPAddressLister {
    +	return v1alpha1.NewIPAddressLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go
    new file mode 100644
    index 0000000000..57e6021431
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceCIDRInformer provides access to a shared informer and lister for
    +// ServiceCIDRs.
    +type ServiceCIDRInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ServiceCIDRLister
    +}
    +
    +type serviceCIDRInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1alpha1().ServiceCIDRs().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1alpha1.ServiceCIDR{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer)
    +}
    +
    +func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister {
    +	return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go
    new file mode 100644
    index 0000000000..8800d6c9cd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/networking/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressInformer provides access to a shared informer and lister for
    +// Ingresses.
    +type IngressInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.IngressLister
    +}
    +
    +type ingressInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIngressInformer constructs a new informer for Ingress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().Ingresses(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().Ingresses(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1beta1.Ingress{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *ingressInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1beta1.Ingress{}, f.defaultInformer)
    +}
    +
    +func (f *ingressInformer) Lister() v1beta1.IngressLister {
    +	return v1beta1.NewIngressLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go
    new file mode 100644
    index 0000000000..17864299bc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/networking/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressClassInformer provides access to a shared informer and lister for
    +// IngressClasses.
    +type IngressClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.IngressClassLister
    +}
    +
    +type ingressClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewIngressClassInformer constructs a new informer for IngressClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIngressClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIngressClassInformer constructs a new informer for IngressClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().IngressClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().IngressClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1beta1.IngressClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIngressClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *ingressClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1beta1.IngressClass{}, f.defaultInformer)
    +}
    +
    +func (f *ingressClassInformer) Lister() v1beta1.IngressClassLister {
    +	return v1beta1.NewIngressClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go
    new file mode 100644
    index 0000000000..974a8fd5bf
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// IPAddresses returns a IPAddressInformer.
    +	IPAddresses() IPAddressInformer
    +	// Ingresses returns a IngressInformer.
    +	Ingresses() IngressInformer
    +	// IngressClasses returns a IngressClassInformer.
    +	IngressClasses() IngressClassInformer
    +	// ServiceCIDRs returns a ServiceCIDRInformer.
    +	ServiceCIDRs() ServiceCIDRInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// IPAddresses returns a IPAddressInformer.
    +func (v *version) IPAddresses() IPAddressInformer {
    +	return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Ingresses returns a IngressInformer.
    +func (v *version) Ingresses() IngressInformer {
    +	return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// IngressClasses returns a IngressClassInformer.
    +func (v *version) IngressClasses() IngressClassInformer {
    +	return &ingressClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ServiceCIDRs returns a ServiceCIDRInformer.
    +func (v *version) ServiceCIDRs() ServiceCIDRInformer {
    +	return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go
    new file mode 100644
    index 0000000000..2a2dfa2907
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/networking/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// IPAddressInformer provides access to a shared informer and lister for
    +// IPAddresses.
    +type IPAddressInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.IPAddressLister
    +}
    +
    +type iPAddressInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewIPAddressInformer constructs a new informer for IPAddress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().IPAddresses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().IPAddresses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1beta1.IPAddress{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1beta1.IPAddress{}, f.defaultInformer)
    +}
    +
    +func (f *iPAddressInformer) Lister() v1beta1.IPAddressLister {
    +	return v1beta1.NewIPAddressLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go
    new file mode 100644
    index 0000000000..d5a9ce0146
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	networkingv1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/networking/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceCIDRInformer provides access to a shared informer and lister for
    +// ServiceCIDRs.
    +type ServiceCIDRInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ServiceCIDRLister
    +}
    +
    +type serviceCIDRInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().ServiceCIDRs().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NetworkingV1beta1().ServiceCIDRs().Watch(context.TODO(), options)
    +			},
    +		},
    +		&networkingv1beta1.ServiceCIDR{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&networkingv1beta1.ServiceCIDR{}, f.defaultInformer)
    +}
    +
    +func (f *serviceCIDRInformer) Lister() v1beta1.ServiceCIDRLister {
    +	return v1beta1.NewServiceCIDRLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/k8s.io/client-go/informers/node/interface.go
    new file mode 100644
    index 0000000000..61ed5af76a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package node
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/node/v1"
    +	v1alpha1 "k8s.io/client-go/informers/node/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/node/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1/interface.go b/vendor/k8s.io/client-go/informers/node/v1/interface.go
    new file mode 100644
    index 0000000000..913fec4aca
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// RuntimeClasses returns a RuntimeClassInformer.
    +	RuntimeClasses() RuntimeClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// RuntimeClasses returns a RuntimeClassInformer.
    +func (v *version) RuntimeClasses() RuntimeClassInformer {
    +	return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go
    new file mode 100644
    index 0000000000..293f4e2e2b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	nodev1 "k8s.io/api/node/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/node/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassInformer provides access to a shared informer and lister for
    +// RuntimeClasses.
    +type RuntimeClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.RuntimeClassLister
    +}
    +
    +type runtimeClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1().RuntimeClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1().RuntimeClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&nodev1.RuntimeClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&nodev1.RuntimeClass{}, f.defaultInformer)
    +}
    +
    +func (f *runtimeClassInformer) Lister() v1.RuntimeClassLister {
    +	return v1.NewRuntimeClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..c56442957e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// RuntimeClasses returns a RuntimeClassInformer.
    +	RuntimeClasses() RuntimeClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// RuntimeClasses returns a RuntimeClassInformer.
    +func (v *version) RuntimeClasses() RuntimeClassInformer {
    +	return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go
    new file mode 100644
    index 0000000000..d314a9573c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	nodev1alpha1 "k8s.io/api/node/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/node/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassInformer provides access to a shared informer and lister for
    +// RuntimeClasses.
    +type RuntimeClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.RuntimeClassLister
    +}
    +
    +type runtimeClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1alpha1().RuntimeClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1alpha1().RuntimeClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&nodev1alpha1.RuntimeClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer)
    +}
    +
    +func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister {
    +	return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go
    new file mode 100644
    index 0000000000..44a1defb6b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// RuntimeClasses returns a RuntimeClassInformer.
    +	RuntimeClasses() RuntimeClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// RuntimeClasses returns a RuntimeClassInformer.
    +func (v *version) RuntimeClasses() RuntimeClassInformer {
    +	return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go
    new file mode 100644
    index 0000000000..07619b2306
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	nodev1beta1 "k8s.io/api/node/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/node/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassInformer provides access to a shared informer and lister for
    +// RuntimeClasses.
    +type RuntimeClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.RuntimeClassLister
    +}
    +
    +type runtimeClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1beta1().RuntimeClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.NodeV1beta1().RuntimeClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&nodev1beta1.RuntimeClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer)
    +}
    +
    +func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister {
    +	return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/policy/interface.go b/vendor/k8s.io/client-go/informers/policy/interface.go
    new file mode 100644
    index 0000000000..889cb8152c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/policy/interface.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package policy
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/policy/v1"
    +	v1beta1 "k8s.io/client-go/informers/policy/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/policy/v1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1/interface.go
    new file mode 100644
    index 0000000000..2c42e1993c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/policy/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// PodDisruptionBudgets returns a PodDisruptionBudgetInformer.
    +	PodDisruptionBudgets() PodDisruptionBudgetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// PodDisruptionBudgets returns a PodDisruptionBudgetInformer.
    +func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer {
    +	return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go
    new file mode 100644
    index 0000000000..436598512a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	policyv1 "k8s.io/api/policy/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/policy/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PodDisruptionBudgetInformer provides access to a shared informer and lister for
    +// PodDisruptionBudgets.
    +type PodDisruptionBudgetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PodDisruptionBudgetLister
    +}
    +
    +type podDisruptionBudgetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.PolicyV1().PodDisruptionBudgets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.PolicyV1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&policyv1.PodDisruptionBudget{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&policyv1.PodDisruptionBudget{}, f.defaultInformer)
    +}
    +
    +func (f *podDisruptionBudgetInformer) Lister() v1.PodDisruptionBudgetLister {
    +	return v1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go
    new file mode 100644
    index 0000000000..055c8adc55
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// PodDisruptionBudgets returns a PodDisruptionBudgetInformer.
    +	PodDisruptionBudgets() PodDisruptionBudgetInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// PodDisruptionBudgets returns a PodDisruptionBudgetInformer.
    +func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer {
    +	return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go
    new file mode 100644
    index 0000000000..4530343ecc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	policyv1beta1 "k8s.io/api/policy/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/policy/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PodDisruptionBudgetInformer provides access to a shared informer and lister for
    +// PodDisruptionBudgets.
    +type PodDisruptionBudgetInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.PodDisruptionBudgetLister
    +}
    +
    +type podDisruptionBudgetInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&policyv1beta1.PodDisruptionBudget{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&policyv1beta1.PodDisruptionBudget{}, f.defaultInformer)
    +}
    +
    +func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister {
    +	return v1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/interface.go b/vendor/k8s.io/client-go/informers/rbac/interface.go
    new file mode 100644
    index 0000000000..228811f8a2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package rbac
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/rbac/v1"
    +	v1alpha1 "k8s.io/client-go/informers/rbac/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/rbac/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go
    new file mode 100644
    index 0000000000..0572be264b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/rbac/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleInformer provides access to a shared informer and lister for
    +// ClusterRoles.
    +type ClusterRoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ClusterRoleLister
    +}
    +
    +type clusterRoleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().ClusterRoles().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().ClusterRoles().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1.ClusterRole{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1.ClusterRole{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister {
    +	return v1.NewClusterRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..51026c0558
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/rbac/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingInformer provides access to a shared informer and lister for
    +// ClusterRoleBindings.
    +type ClusterRoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.ClusterRoleBindingLister
    +}
    +
    +type clusterRoleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().ClusterRoleBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().ClusterRoleBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1.ClusterRoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1.ClusterRoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister {
    +	return v1.NewClusterRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go
    new file mode 100644
    index 0000000000..7f99c9454b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ClusterRoles returns a ClusterRoleInformer.
    +	ClusterRoles() ClusterRoleInformer
    +	// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +	ClusterRoleBindings() ClusterRoleBindingInformer
    +	// Roles returns a RoleInformer.
    +	Roles() RoleInformer
    +	// RoleBindings returns a RoleBindingInformer.
    +	RoleBindings() RoleBindingInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ClusterRoles returns a ClusterRoleInformer.
    +func (v *version) ClusterRoles() ClusterRoleInformer {
    +	return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer {
    +	return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Roles returns a RoleInformer.
    +func (v *version) Roles() RoleInformer {
    +	return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// RoleBindings returns a RoleBindingInformer.
    +func (v *version) RoleBindings() RoleBindingInformer {
    +	return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1/role.go
    new file mode 100644
    index 0000000000..986a5f29f4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1/role.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/rbac/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleInformer provides access to a shared informer and lister for
    +// Roles.
    +type RoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.RoleLister
    +}
    +
    +type roleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().Roles(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().Roles(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1.Role{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1.Role{}, f.defaultInformer)
    +}
    +
    +func (f *roleInformer) Lister() v1.RoleLister {
    +	return v1.NewRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go
    new file mode 100644
    index 0000000000..0264049fb0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1 "k8s.io/api/rbac/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/rbac/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingInformer provides access to a shared informer and lister for
    +// RoleBindings.
    +type RoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.RoleBindingLister
    +}
    +
    +type roleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().RoleBindings(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1().RoleBindings(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1.RoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1.RoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *roleBindingInformer) Lister() v1.RoleBindingLister {
    +	return v1.NewRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go
    new file mode 100644
    index 0000000000..70d9885f0a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleInformer provides access to a shared informer and lister for
    +// ClusterRoles.
    +type ClusterRoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ClusterRoleLister
    +}
    +
    +type clusterRoleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().ClusterRoles().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().ClusterRoles().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1alpha1.ClusterRole{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1alpha1.ClusterRole{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister {
    +	return v1alpha1.NewClusterRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..8c18f67928
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingInformer provides access to a shared informer and lister for
    +// ClusterRoleBindings.
    +type ClusterRoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.ClusterRoleBindingLister
    +}
    +
    +type clusterRoleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().ClusterRoleBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().ClusterRoleBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1alpha1.ClusterRoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1alpha1.ClusterRoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister {
    +	return v1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..d27c79987f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ClusterRoles returns a ClusterRoleInformer.
    +	ClusterRoles() ClusterRoleInformer
    +	// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +	ClusterRoleBindings() ClusterRoleBindingInformer
    +	// Roles returns a RoleInformer.
    +	Roles() RoleInformer
    +	// RoleBindings returns a RoleBindingInformer.
    +	RoleBindings() RoleBindingInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ClusterRoles returns a ClusterRoleInformer.
    +func (v *version) ClusterRoles() ClusterRoleInformer {
    +	return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer {
    +	return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Roles returns a RoleInformer.
    +func (v *version) Roles() RoleInformer {
    +	return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// RoleBindings returns a RoleBindingInformer.
    +func (v *version) RoleBindings() RoleBindingInformer {
    +	return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go
    new file mode 100644
    index 0000000000..7dc4551d92
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleInformer provides access to a shared informer and lister for
    +// Roles.
    +type RoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.RoleLister
    +}
    +
    +type roleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().Roles(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().Roles(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1alpha1.Role{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1alpha1.Role{}, f.defaultInformer)
    +}
    +
    +func (f *roleInformer) Lister() v1alpha1.RoleLister {
    +	return v1alpha1.NewRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go
    new file mode 100644
    index 0000000000..d49ec8b362
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingInformer provides access to a shared informer and lister for
    +// RoleBindings.
    +type RoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.RoleBindingLister
    +}
    +
    +type roleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().RoleBindings(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1alpha1().RoleBindings(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1alpha1.RoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1alpha1.RoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister {
    +	return v1alpha1.NewRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go
    new file mode 100644
    index 0000000000..e50e1d3935
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/rbac/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleInformer provides access to a shared informer and lister for
    +// ClusterRoles.
    +type ClusterRoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ClusterRoleLister
    +}
    +
    +type clusterRoleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().ClusterRoles().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().ClusterRoles().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1beta1.ClusterRole{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1beta1.ClusterRole{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister {
    +	return v1beta1.NewClusterRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..a7ea4cd38d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/rbac/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingInformer provides access to a shared informer and lister for
    +// ClusterRoleBindings.
    +type ClusterRoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.ClusterRoleBindingLister
    +}
    +
    +type clusterRoleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().ClusterRoleBindings().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().ClusterRoleBindings().Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1beta1.ClusterRoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1beta1.ClusterRoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister {
    +	return v1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go
    new file mode 100644
    index 0000000000..04add43afa
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// ClusterRoles returns a ClusterRoleInformer.
    +	ClusterRoles() ClusterRoleInformer
    +	// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +	ClusterRoleBindings() ClusterRoleBindingInformer
    +	// Roles returns a RoleInformer.
    +	Roles() RoleInformer
    +	// RoleBindings returns a RoleBindingInformer.
    +	RoleBindings() RoleBindingInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// ClusterRoles returns a ClusterRoleInformer.
    +func (v *version) ClusterRoles() ClusterRoleInformer {
    +	return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ClusterRoleBindings returns a ClusterRoleBindingInformer.
    +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer {
    +	return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// Roles returns a RoleInformer.
    +func (v *version) Roles() RoleInformer {
    +	return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// RoleBindings returns a RoleBindingInformer.
    +func (v *version) RoleBindings() RoleBindingInformer {
    +	return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go
    new file mode 100644
    index 0000000000..e56961e81e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/rbac/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleInformer provides access to a shared informer and lister for
    +// Roles.
    +type RoleInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.RoleLister
    +}
    +
    +type roleInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleInformer constructs a new informer for Role type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().Roles(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().Roles(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1beta1.Role{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1beta1.Role{}, f.defaultInformer)
    +}
    +
    +func (f *roleInformer) Lister() v1beta1.RoleLister {
    +	return v1beta1.NewRoleLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go
    new file mode 100644
    index 0000000000..d893882db3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/rbac/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingInformer provides access to a shared informer and lister for
    +// RoleBindings.
    +type RoleBindingInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.RoleBindingLister
    +}
    +
    +type roleBindingInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().RoleBindings(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.RbacV1beta1().RoleBindings(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&rbacv1beta1.RoleBinding{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&rbacv1beta1.RoleBinding{}, f.defaultInformer)
    +}
    +
    +func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister {
    +	return v1beta1.NewRoleBindingLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/interface.go b/vendor/k8s.io/client-go/informers/resource/interface.go
    new file mode 100644
    index 0000000000..170d29d808
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/interface.go
    @@ -0,0 +1,46 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package resource
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1alpha3 "k8s.io/client-go/informers/resource/v1alpha3"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1alpha3 provides access to shared informers for resources in V1alpha3.
    +	V1alpha3() v1alpha3.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1alpha3 returns a new v1alpha3.Interface.
    +func (g *group) V1alpha3() v1alpha3.Interface {
    +	return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go
    new file mode 100644
    index 0000000000..c0bcbd1905
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// DeviceClassInformer provides access to a shared informer and lister for
    +// DeviceClasses.
    +type DeviceClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha3.DeviceClassLister
    +}
    +
    +type deviceClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewDeviceClassInformer constructs a new informer for DeviceClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().DeviceClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().DeviceClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&resourcev1alpha3.DeviceClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&resourcev1alpha3.DeviceClass{}, f.defaultInformer)
    +}
    +
    +func (f *deviceClassInformer) Lister() v1alpha3.DeviceClassLister {
    +	return v1alpha3.NewDeviceClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go
    new file mode 100644
    index 0000000000..481a7de451
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// DeviceClasses returns a DeviceClassInformer.
    +	DeviceClasses() DeviceClassInformer
    +	// PodSchedulingContexts returns a PodSchedulingContextInformer.
    +	PodSchedulingContexts() PodSchedulingContextInformer
    +	// ResourceClaims returns a ResourceClaimInformer.
    +	ResourceClaims() ResourceClaimInformer
    +	// ResourceClaimTemplates returns a ResourceClaimTemplateInformer.
    +	ResourceClaimTemplates() ResourceClaimTemplateInformer
    +	// ResourceSlices returns a ResourceSliceInformer.
    +	ResourceSlices() ResourceSliceInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// DeviceClasses returns a DeviceClassInformer.
    +func (v *version) DeviceClasses() DeviceClassInformer {
    +	return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// PodSchedulingContexts returns a PodSchedulingContextInformer.
    +func (v *version) PodSchedulingContexts() PodSchedulingContextInformer {
    +	return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ResourceClaims returns a ResourceClaimInformer.
    +func (v *version) ResourceClaims() ResourceClaimInformer {
    +	return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ResourceClaimTemplates returns a ResourceClaimTemplateInformer.
    +func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer {
    +	return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// ResourceSlices returns a ResourceSliceInformer.
    +func (v *version) ResourceSlices() ResourceSliceInformer {
    +	return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go
    new file mode 100644
    index 0000000000..62fb3614fc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PodSchedulingContextInformer provides access to a shared informer and lister for
    +// PodSchedulingContexts.
    +type PodSchedulingContextInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha3.PodSchedulingContextLister
    +}
    +
    +type podSchedulingContextInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().PodSchedulingContexts(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().PodSchedulingContexts(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&resourcev1alpha3.PodSchedulingContext{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&resourcev1alpha3.PodSchedulingContext{}, f.defaultInformer)
    +}
    +
    +func (f *podSchedulingContextInformer) Lister() v1alpha3.PodSchedulingContextLister {
    +	return v1alpha3.NewPodSchedulingContextLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go
    new file mode 100644
    index 0000000000..fa644579b1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceClaimInformer provides access to a shared informer and lister for
    +// ResourceClaims.
    +type ResourceClaimInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha3.ResourceClaimLister
    +}
    +
    +type resourceClaimInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewResourceClaimInformer constructs a new informer for ResourceClaim type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceClaims(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceClaims(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&resourcev1alpha3.ResourceClaim{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&resourcev1alpha3.ResourceClaim{}, f.defaultInformer)
    +}
    +
    +func (f *resourceClaimInformer) Lister() v1alpha3.ResourceClaimLister {
    +	return v1alpha3.NewResourceClaimLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go
    new file mode 100644
    index 0000000000..294755661c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceClaimTemplateInformer provides access to a shared informer and lister for
    +// ResourceClaimTemplates.
    +type ResourceClaimTemplateInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha3.ResourceClaimTemplateLister
    +}
    +
    +type resourceClaimTemplateInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&resourcev1alpha3.ResourceClaimTemplate{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&resourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer)
    +}
    +
    +func (f *resourceClaimTemplateInformer) Lister() v1alpha3.ResourceClaimTemplateLister {
    +	return v1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go
    new file mode 100644
    index 0000000000..108083530c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceSliceInformer provides access to a shared informer and lister for
    +// ResourceSlices.
    +type ResourceSliceInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha3.ResourceSliceLister
    +}
    +
    +type resourceSliceInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewResourceSliceInformer constructs a new informer for ResourceSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceSlices().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.ResourceV1alpha3().ResourceSlices().Watch(context.TODO(), options)
    +			},
    +		},
    +		&resourcev1alpha3.ResourceSlice{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&resourcev1alpha3.ResourceSlice{}, f.defaultInformer)
    +}
    +
    +func (f *resourceSliceInformer) Lister() v1alpha3.ResourceSliceLister {
    +	return v1alpha3.NewResourceSliceLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go
    new file mode 100644
    index 0000000000..659089b531
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package scheduling
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/scheduling/v1"
    +	v1alpha1 "k8s.io/client-go/informers/scheduling/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/scheduling/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go
    new file mode 100644
    index 0000000000..fd7931f34a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// PriorityClasses returns a PriorityClassInformer.
    +	PriorityClasses() PriorityClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// PriorityClasses returns a PriorityClassInformer.
    +func (v *version) PriorityClasses() PriorityClassInformer {
    +	return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go
    new file mode 100644
    index 0000000000..730616b4a5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	schedulingv1 "k8s.io/api/scheduling/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/scheduling/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassInformer provides access to a shared informer and lister for
    +// PriorityClasses.
    +type PriorityClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.PriorityClassLister
    +}
    +
    +type priorityClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1().PriorityClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1().PriorityClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&schedulingv1.PriorityClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&schedulingv1.PriorityClass{}, f.defaultInformer)
    +}
    +
    +func (f *priorityClassInformer) Lister() v1.PriorityClassLister {
    +	return v1.NewPriorityClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..cd908d14e6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// PriorityClasses returns a PriorityClassInformer.
    +	PriorityClasses() PriorityClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// PriorityClasses returns a PriorityClassInformer.
    +func (v *version) PriorityClasses() PriorityClassInformer {
    +	return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go
    new file mode 100644
    index 0000000000..f82b664369
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassInformer provides access to a shared informer and lister for
    +// PriorityClasses.
    +type PriorityClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.PriorityClassLister
    +}
    +
    +type priorityClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1alpha1().PriorityClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1alpha1().PriorityClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&schedulingv1alpha1.PriorityClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&schedulingv1alpha1.PriorityClass{}, f.defaultInformer)
    +}
    +
    +func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister {
    +	return v1alpha1.NewPriorityClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/interface.go
    new file mode 100644
    index 0000000000..52840a9cee
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// PriorityClasses returns a PriorityClassInformer.
    +	PriorityClasses() PriorityClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// PriorityClasses returns a PriorityClassInformer.
    +func (v *version) PriorityClasses() PriorityClassInformer {
    +	return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go
    new file mode 100644
    index 0000000000..fc7848891e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/scheduling/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassInformer provides access to a shared informer and lister for
    +// PriorityClasses.
    +type PriorityClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.PriorityClassLister
    +}
    +
    +type priorityClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1beta1().PriorityClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.SchedulingV1beta1().PriorityClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&schedulingv1beta1.PriorityClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&schedulingv1beta1.PriorityClass{}, f.defaultInformer)
    +}
    +
    +func (f *priorityClassInformer) Lister() v1beta1.PriorityClassLister {
    +	return v1beta1.NewPriorityClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/interface.go b/vendor/k8s.io/client-go/informers/storage/interface.go
    new file mode 100644
    index 0000000000..8245aa60c9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/interface.go
    @@ -0,0 +1,62 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package storage
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1 "k8s.io/client-go/informers/storage/v1"
    +	v1alpha1 "k8s.io/client-go/informers/storage/v1alpha1"
    +	v1beta1 "k8s.io/client-go/informers/storage/v1beta1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1 provides access to shared informers for resources in V1.
    +	V1() v1.Interface
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +	// V1beta1 provides access to shared informers for resources in V1beta1.
    +	V1beta1() v1beta1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1 returns a new v1.Interface.
    +func (g *group) V1() v1.Interface {
    +	return v1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    +
    +// V1beta1 returns a new v1beta1.Interface.
    +func (g *group) V1beta1() v1beta1.Interface {
    +	return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go
    new file mode 100644
    index 0000000000..6fd1e678d9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1 "k8s.io/api/storage/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/storage/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIDriverInformer provides access to a shared informer and lister for
    +// CSIDrivers.
    +type CSIDriverInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.CSIDriverLister
    +}
    +
    +type cSIDriverInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCSIDriverInformer constructs a new informer for CSIDriver type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSIDriverInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSIDriverInformer constructs a new informer for CSIDriver type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSIDrivers().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSIDrivers().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1.CSIDriver{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSIDriverInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1.CSIDriver{}, f.defaultInformer)
    +}
    +
    +func (f *cSIDriverInformer) Lister() v1.CSIDriverLister {
    +	return v1.NewCSIDriverLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go
    new file mode 100644
    index 0000000000..96416967fb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1 "k8s.io/api/storage/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/storage/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSINodeInformer provides access to a shared informer and lister for
    +// CSINodes.
    +type CSINodeInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.CSINodeLister
    +}
    +
    +type cSINodeInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCSINodeInformer constructs a new informer for CSINode type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSINodeInformer constructs a new informer for CSINode type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSINodes().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSINodes().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1.CSINode{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSINodeInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1.CSINode{}, f.defaultInformer)
    +}
    +
    +func (f *cSINodeInformer) Lister() v1.CSINodeLister {
    +	return v1.NewCSINodeLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..9b9095f3ae
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1 "k8s.io/api/storage/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/storage/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityInformer provides access to a shared informer and lister for
    +// CSIStorageCapacities.
    +type CSIStorageCapacityInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.CSIStorageCapacityLister
    +}
    +
    +type cSIStorageCapacityInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSIStorageCapacities(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().CSIStorageCapacities(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1.CSIStorageCapacity{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1.CSIStorageCapacity{}, f.defaultInformer)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Lister() v1.CSIStorageCapacityLister {
    +	return v1.NewCSIStorageCapacityLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go
    new file mode 100644
    index 0000000000..4f017b0864
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CSIDrivers returns a CSIDriverInformer.
    +	CSIDrivers() CSIDriverInformer
    +	// CSINodes returns a CSINodeInformer.
    +	CSINodes() CSINodeInformer
    +	// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +	CSIStorageCapacities() CSIStorageCapacityInformer
    +	// StorageClasses returns a StorageClassInformer.
    +	StorageClasses() StorageClassInformer
    +	// VolumeAttachments returns a VolumeAttachmentInformer.
    +	VolumeAttachments() VolumeAttachmentInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CSIDrivers returns a CSIDriverInformer.
    +func (v *version) CSIDrivers() CSIDriverInformer {
    +	return &cSIDriverInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// CSINodes returns a CSINodeInformer.
    +func (v *version) CSINodes() CSINodeInformer {
    +	return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +func (v *version) CSIStorageCapacities() CSIStorageCapacityInformer {
    +	return &cSIStorageCapacityInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// StorageClasses returns a StorageClassInformer.
    +func (v *version) StorageClasses() StorageClassInformer {
    +	return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// VolumeAttachments returns a VolumeAttachmentInformer.
    +func (v *version) VolumeAttachments() VolumeAttachmentInformer {
    +	return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go
    new file mode 100644
    index 0000000000..8cde79d9a3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1 "k8s.io/api/storage/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/storage/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageClassInformer provides access to a shared informer and lister for
    +// StorageClasses.
    +type StorageClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.StorageClassLister
    +}
    +
    +type storageClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewStorageClassInformer constructs a new informer for StorageClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().StorageClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().StorageClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1.StorageClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1.StorageClass{}, f.defaultInformer)
    +}
    +
    +func (f *storageClassInformer) Lister() v1.StorageClassLister {
    +	return v1.NewStorageClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go
    new file mode 100644
    index 0000000000..be605ff48c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1 "k8s.io/api/storage/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1 "k8s.io/client-go/listers/storage/v1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentInformer provides access to a shared informer and lister for
    +// VolumeAttachments.
    +type VolumeAttachmentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1.VolumeAttachmentLister
    +}
    +
    +type volumeAttachmentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().VolumeAttachments().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1().VolumeAttachments().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1.VolumeAttachment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer)
    +}
    +
    +func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister {
    +	return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..e59dfab2d1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityInformer provides access to a shared informer and lister for
    +// CSIStorageCapacities.
    +type CSIStorageCapacityInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.CSIStorageCapacityLister
    +}
    +
    +type cSIStorageCapacityInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().CSIStorageCapacities(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().CSIStorageCapacities(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1alpha1.CSIStorageCapacity{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1alpha1.CSIStorageCapacity{}, f.defaultInformer)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Lister() v1alpha1.CSIStorageCapacityLister {
    +	return v1alpha1.NewCSIStorageCapacityLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..bda3b1add9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go
    @@ -0,0 +1,59 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +	CSIStorageCapacities() CSIStorageCapacityInformer
    +	// VolumeAttachments returns a VolumeAttachmentInformer.
    +	VolumeAttachments() VolumeAttachmentInformer
    +	// VolumeAttributesClasses returns a VolumeAttributesClassInformer.
    +	VolumeAttributesClasses() VolumeAttributesClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +func (v *version) CSIStorageCapacities() CSIStorageCapacityInformer {
    +	return &cSIStorageCapacityInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// VolumeAttachments returns a VolumeAttachmentInformer.
    +func (v *version) VolumeAttachments() VolumeAttachmentInformer {
    +	return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// VolumeAttributesClasses returns a VolumeAttributesClassInformer.
    +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer {
    +	return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go
    new file mode 100644
    index 0000000000..445496dade
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentInformer provides access to a shared informer and lister for
    +// VolumeAttachments.
    +type VolumeAttachmentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.VolumeAttachmentLister
    +}
    +
    +type volumeAttachmentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().VolumeAttachments().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().VolumeAttachments().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1alpha1.VolumeAttachment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1alpha1.VolumeAttachment{}, f.defaultInformer)
    +}
    +
    +func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister {
    +	return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..5e62e2f423
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttributesClassInformer provides access to a shared informer and lister for
    +// VolumeAttributesClasses.
    +type VolumeAttributesClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.VolumeAttributesClassLister
    +}
    +
    +type volumeAttributesClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().VolumeAttributesClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1alpha1().VolumeAttributesClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1alpha1.VolumeAttributesClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1alpha1.VolumeAttributesClass{}, f.defaultInformer)
    +}
    +
    +func (f *volumeAttributesClassInformer) Lister() v1alpha1.VolumeAttributesClassLister {
    +	return v1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go
    new file mode 100644
    index 0000000000..f138a915b8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIDriverInformer provides access to a shared informer and lister for
    +// CSIDrivers.
    +type CSIDriverInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.CSIDriverLister
    +}
    +
    +type cSIDriverInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCSIDriverInformer constructs a new informer for CSIDriver type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSIDriverInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSIDriverInformer constructs a new informer for CSIDriver type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSIDrivers().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.CSIDriver{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSIDriverInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer)
    +}
    +
    +func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister {
    +	return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go
    new file mode 100644
    index 0000000000..6ba63172a3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSINodeInformer provides access to a shared informer and lister for
    +// CSINodes.
    +type CSINodeInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.CSINodeLister
    +}
    +
    +type cSINodeInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewCSINodeInformer constructs a new informer for CSINode type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSINodeInformer constructs a new informer for CSINode type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSINodes().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSINodes().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.CSINode{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSINodeInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.CSINode{}, f.defaultInformer)
    +}
    +
    +func (f *cSINodeInformer) Lister() v1beta1.CSINodeLister {
    +	return v1beta1.NewCSINodeLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..8f0cc46687
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go
    @@ -0,0 +1,90 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityInformer provides access to a shared informer and lister for
    +// CSIStorageCapacities.
    +type CSIStorageCapacityInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.CSIStorageCapacityLister
    +}
    +
    +type cSIStorageCapacityInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +	namespace        string
    +}
    +
    +// NewCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, namespace, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredCSIStorageCapacityInformer constructs a new informer for CSIStorageCapacity type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSIStorageCapacities(namespace).List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().CSIStorageCapacities(namespace).Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.CSIStorageCapacity{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredCSIStorageCapacityInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.CSIStorageCapacity{}, f.defaultInformer)
    +}
    +
    +func (f *cSIStorageCapacityInformer) Lister() v1beta1.CSIStorageCapacityLister {
    +	return v1beta1.NewCSIStorageCapacityLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go
    new file mode 100644
    index 0000000000..7433951855
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go
    @@ -0,0 +1,80 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// CSIDrivers returns a CSIDriverInformer.
    +	CSIDrivers() CSIDriverInformer
    +	// CSINodes returns a CSINodeInformer.
    +	CSINodes() CSINodeInformer
    +	// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +	CSIStorageCapacities() CSIStorageCapacityInformer
    +	// StorageClasses returns a StorageClassInformer.
    +	StorageClasses() StorageClassInformer
    +	// VolumeAttachments returns a VolumeAttachmentInformer.
    +	VolumeAttachments() VolumeAttachmentInformer
    +	// VolumeAttributesClasses returns a VolumeAttributesClassInformer.
    +	VolumeAttributesClasses() VolumeAttributesClassInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// CSIDrivers returns a CSIDriverInformer.
    +func (v *version) CSIDrivers() CSIDriverInformer {
    +	return &cSIDriverInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// CSINodes returns a CSINodeInformer.
    +func (v *version) CSINodes() CSINodeInformer {
    +	return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// CSIStorageCapacities returns a CSIStorageCapacityInformer.
    +func (v *version) CSIStorageCapacities() CSIStorageCapacityInformer {
    +	return &cSIStorageCapacityInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// StorageClasses returns a StorageClassInformer.
    +func (v *version) StorageClasses() StorageClassInformer {
    +	return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// VolumeAttachments returns a VolumeAttachmentInformer.
    +func (v *version) VolumeAttachments() VolumeAttachmentInformer {
    +	return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    +
    +// VolumeAttributesClasses returns a VolumeAttributesClassInformer.
    +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer {
    +	return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go
    new file mode 100644
    index 0000000000..a6582bf3d6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageClassInformer provides access to a shared informer and lister for
    +// StorageClasses.
    +type StorageClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.StorageClassLister
    +}
    +
    +type storageClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewStorageClassInformer constructs a new informer for StorageClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().StorageClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().StorageClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.StorageClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.StorageClass{}, f.defaultInformer)
    +}
    +
    +func (f *storageClassInformer) Lister() v1beta1.StorageClassLister {
    +	return v1beta1.NewStorageClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go
    new file mode 100644
    index 0000000000..e894246349
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentInformer provides access to a shared informer and lister for
    +// VolumeAttachments.
    +type VolumeAttachmentInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.VolumeAttachmentLister
    +}
    +
    +type volumeAttachmentInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().VolumeAttachments().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().VolumeAttachments().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.VolumeAttachment{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.VolumeAttachment{}, f.defaultInformer)
    +}
    +
    +func (f *volumeAttachmentInformer) Lister() v1beta1.VolumeAttachmentLister {
    +	return v1beta1.NewVolumeAttachmentLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..ede90ce43c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagev1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttributesClassInformer provides access to a shared informer and lister for
    +// VolumeAttributesClasses.
    +type VolumeAttributesClassInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1beta1.VolumeAttributesClassLister
    +}
    +
    +type volumeAttributesClassInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().VolumeAttributesClasses().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StorageV1beta1().VolumeAttributesClasses().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagev1beta1.VolumeAttributesClass{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagev1beta1.VolumeAttributesClass{}, f.defaultInformer)
    +}
    +
    +func (f *volumeAttributesClassInformer) Lister() v1beta1.VolumeAttributesClassLister {
    +	return v1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storagemigration/interface.go b/vendor/k8s.io/client-go/informers/storagemigration/interface.go
    new file mode 100644
    index 0000000000..1f7030fea8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storagemigration/interface.go
    @@ -0,0 +1,46 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package storagemigration
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	v1alpha1 "k8s.io/client-go/informers/storagemigration/v1alpha1"
    +)
    +
    +// Interface provides access to each of this group's versions.
    +type Interface interface {
    +	// V1alpha1 provides access to shared informers for resources in V1alpha1.
    +	V1alpha1() v1alpha1.Interface
    +}
    +
    +type group struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// V1alpha1 returns a new v1alpha1.Interface.
    +func (g *group) V1alpha1() v1alpha1.Interface {
    +	return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go
    new file mode 100644
    index 0000000000..60724e7a28
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go
    @@ -0,0 +1,45 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +)
    +
    +// Interface provides access to all the informers in this group version.
    +type Interface interface {
    +	// StorageVersionMigrations returns a StorageVersionMigrationInformer.
    +	StorageVersionMigrations() StorageVersionMigrationInformer
    +}
    +
    +type version struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	namespace        string
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// New returns a new Interface.
    +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
    +	return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
    +}
    +
    +// StorageVersionMigrations returns a StorageVersionMigrationInformer.
    +func (v *version) StorageVersionMigrations() StorageVersionMigrationInformer {
    +	return &storageVersionMigrationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
    +}
    diff --git a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go
    new file mode 100644
    index 0000000000..70e7c72797
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go
    @@ -0,0 +1,89 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by informer-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +	time "time"
    +
    +	storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	runtime "k8s.io/apimachinery/pkg/runtime"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
    +	kubernetes "k8s.io/client-go/kubernetes"
    +	v1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1"
    +	cache "k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageVersionMigrationInformer provides access to a shared informer and lister for
    +// StorageVersionMigrations.
    +type StorageVersionMigrationInformer interface {
    +	Informer() cache.SharedIndexInformer
    +	Lister() v1alpha1.StorageVersionMigrationLister
    +}
    +
    +type storageVersionMigrationInformer struct {
    +	factory          internalinterfaces.SharedInformerFactory
    +	tweakListOptions internalinterfaces.TweakListOptionsFunc
    +}
    +
    +// NewStorageVersionMigrationInformer constructs a new informer for StorageVersionMigration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewStorageVersionMigrationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
    +	return NewFilteredStorageVersionMigrationInformer(client, resyncPeriod, indexers, nil)
    +}
    +
    +// NewFilteredStorageVersionMigrationInformer constructs a new informer for StorageVersionMigration type.
    +// Always prefer using an informer factory to get a shared informer instead of getting an independent
    +// one. This reduces memory footprint and number of connections to the server.
    +func NewFilteredStorageVersionMigrationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
    +	return cache.NewSharedIndexInformer(
    +		&cache.ListWatch{
    +			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StoragemigrationV1alpha1().StorageVersionMigrations().List(context.TODO(), options)
    +			},
    +			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
    +				if tweakListOptions != nil {
    +					tweakListOptions(&options)
    +				}
    +				return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(context.TODO(), options)
    +			},
    +		},
    +		&storagemigrationv1alpha1.StorageVersionMigration{},
    +		resyncPeriod,
    +		indexers,
    +	)
    +}
    +
    +func (f *storageVersionMigrationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
    +	return NewFilteredStorageVersionMigrationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
    +}
    +
    +func (f *storageVersionMigrationInformer) Informer() cache.SharedIndexInformer {
    +	return f.factory.InformerFor(&storagemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer)
    +}
    +
    +func (f *storageVersionMigrationInformer) Lister() v1alpha1.StorageVersionMigrationLister {
    +	return v1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer())
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go
    index eaa206ff65..9cddb0bbed 100644
    --- a/vendor/k8s.io/client-go/kubernetes/clientset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go
    @@ -45,6 +45,7 @@ import (
     	certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1"
     	certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
     	coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
    +	coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
     	coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
     	corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
     	discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1"
    @@ -67,7 +68,7 @@ import (
     	rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
     	rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1"
     	rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
    -	resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2"
    +	resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3"
     	schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1"
     	schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
     	schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1"
    @@ -102,6 +103,7 @@ type Interface interface {
     	CertificatesV1() certificatesv1.CertificatesV1Interface
     	CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface
     	CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface
    +	CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface
     	CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface
     	CoordinationV1() coordinationv1.CoordinationV1Interface
     	CoreV1() corev1.CoreV1Interface
    @@ -125,7 +127,7 @@ type Interface interface {
     	RbacV1() rbacv1.RbacV1Interface
     	RbacV1beta1() rbacv1beta1.RbacV1beta1Interface
     	RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface
    -	ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface
    +	ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface
     	SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface
     	SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface
     	SchedulingV1() schedulingv1.SchedulingV1Interface
    @@ -159,6 +161,7 @@ type Clientset struct {
     	certificatesV1                *certificatesv1.CertificatesV1Client
     	certificatesV1beta1           *certificatesv1beta1.CertificatesV1beta1Client
     	certificatesV1alpha1          *certificatesv1alpha1.CertificatesV1alpha1Client
    +	coordinationV1alpha1          *coordinationv1alpha1.CoordinationV1alpha1Client
     	coordinationV1beta1           *coordinationv1beta1.CoordinationV1beta1Client
     	coordinationV1                *coordinationv1.CoordinationV1Client
     	coreV1                        *corev1.CoreV1Client
    @@ -182,7 +185,7 @@ type Clientset struct {
     	rbacV1                        *rbacv1.RbacV1Client
     	rbacV1beta1                   *rbacv1beta1.RbacV1beta1Client
     	rbacV1alpha1                  *rbacv1alpha1.RbacV1alpha1Client
    -	resourceV1alpha2              *resourcev1alpha2.ResourceV1alpha2Client
    +	resourceV1alpha3              *resourcev1alpha3.ResourceV1alpha3Client
     	schedulingV1alpha1            *schedulingv1alpha1.SchedulingV1alpha1Client
     	schedulingV1beta1             *schedulingv1beta1.SchedulingV1beta1Client
     	schedulingV1                  *schedulingv1.SchedulingV1Client
    @@ -297,6 +300,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al
     	return c.certificatesV1alpha1
     }
     
    +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client
    +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface {
    +	return c.coordinationV1alpha1
    +}
    +
     // CoordinationV1beta1 retrieves the CoordinationV1beta1Client
     func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface {
     	return c.coordinationV1beta1
    @@ -412,9 +420,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
     	return c.rbacV1alpha1
     }
     
    -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client
    -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface {
    -	return c.resourceV1alpha2
    +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client
    +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface {
    +	return c.resourceV1alpha3
     }
     
     // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client
    @@ -580,6 +588,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
     	if err != nil {
     		return nil, err
     	}
    +	cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
    +	if err != nil {
    +		return nil, err
    +	}
     	cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
     	if err != nil {
     		return nil, err
    @@ -672,7 +684,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
     	if err != nil {
     		return nil, err
     	}
    -	cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient)
    +	cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient)
     	if err != nil {
     		return nil, err
     	}
    @@ -746,6 +758,7 @@ func New(c rest.Interface) *Clientset {
     	cs.certificatesV1 = certificatesv1.New(c)
     	cs.certificatesV1beta1 = certificatesv1beta1.New(c)
     	cs.certificatesV1alpha1 = certificatesv1alpha1.New(c)
    +	cs.coordinationV1alpha1 = coordinationv1alpha1.New(c)
     	cs.coordinationV1beta1 = coordinationv1beta1.New(c)
     	cs.coordinationV1 = coordinationv1.New(c)
     	cs.coreV1 = corev1.New(c)
    @@ -769,7 +782,7 @@ func New(c rest.Interface) *Clientset {
     	cs.rbacV1 = rbacv1.New(c)
     	cs.rbacV1beta1 = rbacv1beta1.New(c)
     	cs.rbacV1alpha1 = rbacv1alpha1.New(c)
    -	cs.resourceV1alpha2 = resourcev1alpha2.New(c)
    +	cs.resourceV1alpha3 = resourcev1alpha3.New(c)
     	cs.schedulingV1alpha1 = schedulingv1alpha1.New(c)
     	cs.schedulingV1beta1 = schedulingv1beta1.New(c)
     	cs.schedulingV1 = schedulingv1.New(c)
    diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
    index a62b8f7c45..132f917abe 100644
    --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
    +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
    @@ -21,6 +21,7 @@ package fake
     import (
     	"k8s.io/apimachinery/pkg/runtime"
     	"k8s.io/apimachinery/pkg/watch"
    +	applyconfigurations "k8s.io/client-go/applyconfigurations"
     	"k8s.io/client-go/discovery"
     	fakediscovery "k8s.io/client-go/discovery/fake"
     	clientset "k8s.io/client-go/kubernetes"
    @@ -68,6 +69,8 @@ import (
     	fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake"
     	coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
     	fakecoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1/fake"
    +	coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
    +	fakecoordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake"
     	coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
     	fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake"
     	corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
    @@ -112,8 +115,8 @@ import (
     	fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake"
     	rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
     	fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake"
    -	resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2"
    -	fakeresourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake"
    +	resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3"
    +	fakeresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake"
     	schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1"
     	fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake"
     	schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
    @@ -133,8 +136,12 @@ import (
     
     // NewSimpleClientset returns a clientset that will respond with the provided objects.
     // It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
    -// without applying any validations and/or defaults. It shouldn't be considered a replacement
    +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
     // for a real clientset and is mostly useful in simple unit tests.
    +//
    +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
    +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
    +// via --with-applyconfig).
     func NewSimpleClientset(objects ...runtime.Object) *Clientset {
     	o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
     	for _, obj := range objects {
    @@ -176,6 +183,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
     	return c.tracker
     }
     
    +// NewClientset returns a clientset that will respond with the provided objects.
    +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
    +// without applying any validations and/or defaults. It shouldn't be considered a replacement
    +// for a real clientset and is mostly useful in simple unit tests.
    +func NewClientset(objects ...runtime.Object) *Clientset {
    +	o := testing.NewFieldManagedObjectTracker(
    +		scheme,
    +		codecs.UniversalDecoder(),
    +		applyconfigurations.NewTypeConverter(scheme),
    +	)
    +	for _, obj := range objects {
    +		if err := o.Add(obj); err != nil {
    +			panic(err)
    +		}
    +	}
    +
    +	cs := &Clientset{tracker: o}
    +	cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
    +	cs.AddReactor("*", "*", testing.ObjectReaction(o))
    +	cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
    +		gvr := action.GetResource()
    +		ns := action.GetNamespace()
    +		watch, err := o.Watch(gvr, ns)
    +		if err != nil {
    +			return false, nil, err
    +		}
    +		return true, watch, nil
    +	})
    +
    +	return cs
    +}
    +
     var (
     	_ clientset.Interface = &Clientset{}
     	_ testing.FakeClient  = &Clientset{}
    @@ -286,6 +325,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al
     	return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake}
     }
     
    +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client
    +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface {
    +	return &fakecoordinationv1alpha1.FakeCoordinationV1alpha1{Fake: &c.Fake}
    +}
    +
     // CoordinationV1beta1 retrieves the CoordinationV1beta1Client
     func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface {
     	return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake}
    @@ -401,9 +445,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
     	return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake}
     }
     
    -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client
    -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface {
    -	return &fakeresourcev1alpha2.FakeResourceV1alpha2{Fake: &c.Fake}
    +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client
    +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface {
    +	return &fakeresourcev1alpha3.FakeResourceV1alpha3{Fake: &c.Fake}
     }
     
     // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client
    diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go
    index 339983fe0a..157abae5ff 100644
    --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go
    +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go
    @@ -41,6 +41,7 @@ import (
     	certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
     	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
     	coordinationv1 "k8s.io/api/coordination/v1"
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
     	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
     	corev1 "k8s.io/api/core/v1"
     	discoveryv1 "k8s.io/api/discovery/v1"
    @@ -63,7 +64,7 @@ import (
     	rbacv1 "k8s.io/api/rbac/v1"
     	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
     	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
     	schedulingv1 "k8s.io/api/scheduling/v1"
     	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
     	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
    @@ -103,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
     	certificatesv1.AddToScheme,
     	certificatesv1beta1.AddToScheme,
     	certificatesv1alpha1.AddToScheme,
    +	coordinationv1alpha1.AddToScheme,
     	coordinationv1beta1.AddToScheme,
     	coordinationv1.AddToScheme,
     	corev1.AddToScheme,
    @@ -126,7 +128,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
     	rbacv1.AddToScheme,
     	rbacv1beta1.AddToScheme,
     	rbacv1alpha1.AddToScheme,
    -	resourcev1alpha2.AddToScheme,
    +	resourcev1alpha3.AddToScheme,
     	schedulingv1alpha1.AddToScheme,
     	schedulingv1beta1.AddToScheme,
     	schedulingv1.AddToScheme,
    diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
    index 8ebfb7cea5..5262b0f046 100644
    --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go
    +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
    @@ -41,6 +41,7 @@ import (
     	certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
     	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
     	coordinationv1 "k8s.io/api/coordination/v1"
    +	coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
     	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
     	corev1 "k8s.io/api/core/v1"
     	discoveryv1 "k8s.io/api/discovery/v1"
    @@ -63,7 +64,7 @@ import (
     	rbacv1 "k8s.io/api/rbac/v1"
     	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
     	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
    -	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    +	resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
     	schedulingv1 "k8s.io/api/scheduling/v1"
     	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
     	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
    @@ -103,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
     	certificatesv1.AddToScheme,
     	certificatesv1beta1.AddToScheme,
     	certificatesv1alpha1.AddToScheme,
    +	coordinationv1alpha1.AddToScheme,
     	coordinationv1beta1.AddToScheme,
     	coordinationv1.AddToScheme,
     	corev1.AddToScheme,
    @@ -126,7 +128,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
     	rbacv1.AddToScheme,
     	rbacv1beta1.AddToScheme,
     	rbacv1alpha1.AddToScheme,
    -	resourcev1alpha2.AddToScheme,
    +	resourcev1alpha3.AddToScheme,
     	schedulingv1alpha1.AddToScheme,
     	schedulingv1beta1.AddToScheme,
     	schedulingv1.AddToScheme,
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go
    index b88598b715..2d371e6fc7 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go
    @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Mutating
     
     // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
     func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.MutatingWebhookConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
     func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) {
    +	emptyResult := &v1.MutatingWebhookConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1.MutatingWebhookConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav
     // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
     func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts))
     }
     
     // Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.MutatingWebhookConfiguration), err
     }
     
     // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.MutatingWebhookConfiguration), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context
     
     // Patch applies the patch and returns the patched mutatingWebhookConfiguration.
     func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.MutatingWebhookConfiguration), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW
     	if name == nil {
     		return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.MutatingWebhookConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go
    index c947e6572f..d6c7bec898 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go
    @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1.SchemeGroupVersion.WithKind("Validating
     
     // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
     func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1.ValidatingAdmissionPolicyList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
     func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) {
    +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicy.
     func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicy), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go
    index 9ace735930..5b6719be0a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go
    @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1.SchemeGroupVersion.WithKind("Vali
     
     // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicyBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
     func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1.ValidatingAdmissionPolicyBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts m
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
     func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicyBinding), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
     func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingAdmissionPolicyBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go
    index a6951c736e..ff7fc43013 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go
    @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Valida
     
     // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
     func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingWebhookConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
     func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) {
    +	emptyResult := &v1.ValidatingWebhookConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1.ValidatingWebhookConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts met
     // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
     func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts))
     }
     
     // Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingWebhookConfiguration), err
     }
     
     // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingWebhookConfiguration), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte
     
     // Patch applies the patch and returns the patched validatingWebhookConfiguration.
     func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingWebhookConfiguration), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat
     	if name == nil {
     		return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ValidatingWebhookConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
    index edbc826d19..e863766c60 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/admissionregistration/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface.
    @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface {
     
     // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
     type mutatingWebhookConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]
     }
     
     // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations
     func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations {
     	return &mutatingWebhookConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration](
    +			"mutatingwebhookconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} },
    +			func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
    -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    -	result = &v1.MutatingWebhookConfiguration{}
    -	err = c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
    -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.MutatingWebhookConfigurationList{}
    -	err = c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
    -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
    -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    -	result = &v1.MutatingWebhookConfiguration{}
    -	err = c.client.Post().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(mutatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
    -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    -	result = &v1.MutatingWebhookConfiguration{}
    -	err = c.client.Put().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(mutatingWebhookConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(mutatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
    -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched mutatingWebhookConfiguration.
    -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) {
    -	result = &v1.MutatingWebhookConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration.
    -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) {
    -	if mutatingWebhookConfiguration == nil {
    -		return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(mutatingWebhookConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := mutatingWebhookConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1.MutatingWebhookConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("mutatingwebhookconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
    index 0b0b05acd4..1b20e69606 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/admissionregistration/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface.
    @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface {
     type ValidatingAdmissionPolicyInterface interface {
     	Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error)
     	Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error)
     	Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error)
     	ValidatingAdmissionPolicyExpansion
     }
     
     // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
     type validatingAdmissionPolicies struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
     func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies {
     	return &validatingAdmissionPolicies{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration](
    +			"validatingadmissionpolicies",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} },
    +			func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
    -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
    -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ValidatingAdmissionPolicyList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
    -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
    index 83a8ef163d..44694b2329 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/admissionregistration/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface.
    @@ -55,143 +52,18 @@ type ValidatingAdmissionPolicyBindingInterface interface {
     
     // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
     type validatingAdmissionPolicyBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
     func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings {
     	return &validatingAdmissionPolicyBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration](
    +			"validatingadmissionpolicybindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} },
    +			func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
    -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
    -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ValidatingAdmissionPolicyBindingList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
    -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(validatingAdmissionPolicyBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) {
    -	if validatingAdmissionPolicyBinding == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicyBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicyBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
    -	}
    -	result = &v1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
    index 065e3c8341..11b4ac0591 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/admissionregistration/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface.
    @@ -55,143 +52,18 @@ type ValidatingWebhookConfigurationInterface interface {
     
     // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
     type validatingWebhookConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]
     }
     
     // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations
     func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations {
     	return &validatingWebhookConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration](
    +			"validatingwebhookconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} },
    +			func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
    -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1.ValidatingWebhookConfiguration{}
    -	err = c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
    -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ValidatingWebhookConfigurationList{}
    -	err = c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
    -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
    -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1.ValidatingWebhookConfiguration{}
    -	err = c.client.Post().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
    -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1.ValidatingWebhookConfiguration{}
    -	err = c.client.Put().
    -		Resource("validatingwebhookconfigurations").
    -		Name(validatingWebhookConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
    -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingWebhookConfiguration.
    -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1.ValidatingWebhookConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration.
    -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
    -	if validatingWebhookConfiguration == nil {
    -		return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingWebhookConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingWebhookConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1.ValidatingWebhookConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingwebhookconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go
    index f4358ce46c..ef4d843e00 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go
    @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("Vali
     
     // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
     func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1alpha1.ValidatingAdmissionPolicyList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
     func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) {
    +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicy.
     func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicy), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go
    index c520655f9d..f7cc966fb1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go
    @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind
     
     // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1alpha1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
     func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1alpha1.ValidatingAdmissionPolicyBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
     func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
     func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    index 1d994b5abf..c2b7c825cb 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface.
    @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface {
     type ValidatingAdmissionPolicyInterface interface {
     	Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
     	Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
     	Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
     	ValidatingAdmissionPolicyExpansion
     }
     
     // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
     type validatingAdmissionPolicies struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
     func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies {
     	return &validatingAdmissionPolicies{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration](
    +			"validatingadmissionpolicies",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} },
    +			func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
    -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
    -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ValidatingAdmissionPolicyList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
    -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    index 39823ca82b..d8d0796ead 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface.
    @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface {
     
     // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
     type validatingAdmissionPolicyBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
     func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings {
     	return &validatingAdmissionPolicyBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration](
    +			"validatingadmissionpolicybindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} },
    +			func() *v1alpha1.ValidatingAdmissionPolicyBindingList {
    +				return &v1alpha1.ValidatingAdmissionPolicyBindingList{}
    +			}),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
    -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
    -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ValidatingAdmissionPolicyBindingList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
    -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(validatingAdmissionPolicyBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1alpha1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) {
    -	if validatingAdmissionPolicyBinding == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicyBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicyBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
    index 9d85aff37f..7671549323 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
    @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Mut
     
     // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
     func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.MutatingWebhookConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
     func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
    +	emptyResult := &v1beta1.MutatingWebhookConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.Li
     // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
     func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts))
     }
     
     // Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.MutatingWebhookConfiguration), err
     }
     
     // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.MutatingWebhookConfiguration), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context
     
     // Patch applies the patch and returns the patched mutatingWebhookConfiguration.
     func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.MutatingWebhookConfiguration), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW
     	if name == nil {
     		return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.MutatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.MutatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.MutatingWebhookConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go
    index 90cb4ff6ca..e30891c779 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go
    @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("Valid
     
     // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
     func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1beta1.ValidatingAdmissionPolicyList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
     func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) {
    +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicy.
     func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.ValidatingAdmissionPolicy{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicy), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go
    index f771f81f30..207db37529 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go
    @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1beta1.SchemeGroupVersion.WithKind(
     
     // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1beta1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
     func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1beta1.ValidatingAdmissionPolicyBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v
     // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
     func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts))
     }
     
     // Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err
     }
     
     // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
     func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con
     
     // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
     func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid
     	if name == nil {
     		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicyBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
    index 41e3a7c1ee..f78a31ee0e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
    @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("V
     
     // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
     func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingWebhookConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
     func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
    +	emptyResult := &v1beta1.ValidatingWebhookConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.
     // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
     func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts))
     }
     
     // Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingWebhookConfiguration), err
     }
     
     // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
     func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingWebhookConfiguration), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte
     
     // Patch applies the patch and returns the patched validatingWebhookConfiguration.
     func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    +	emptyResult := &v1beta1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingWebhookConfiguration), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat
     	if name == nil {
     		return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ValidatingWebhookConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingWebhookConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ValidatingWebhookConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    index ca6bb8bd50..7a5bc8b9b3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface.
    @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface {
     
     // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
     type mutatingWebhookConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]
     }
     
     // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations
     func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations {
     	return &mutatingWebhookConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration](
    +			"mutatingwebhookconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} },
    +			func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
    -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    -	result = &v1beta1.MutatingWebhookConfiguration{}
    -	err = c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
    -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.MutatingWebhookConfigurationList{}
    -	err = c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
    -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
    -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    -	result = &v1beta1.MutatingWebhookConfiguration{}
    -	err = c.client.Post().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(mutatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
    -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    -	result = &v1beta1.MutatingWebhookConfiguration{}
    -	err = c.client.Put().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(mutatingWebhookConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(mutatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
    -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("mutatingwebhookconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched mutatingWebhookConfiguration.
    -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    -	result = &v1beta1.MutatingWebhookConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("mutatingwebhookconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration.
    -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
    -	if mutatingWebhookConfiguration == nil {
    -		return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(mutatingWebhookConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := mutatingWebhookConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.MutatingWebhookConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("mutatingwebhookconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
    index bea51b587f..0023d8837c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface.
    @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface {
     type ValidatingAdmissionPolicyInterface interface {
     	Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
     	Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error)
     	Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
     	ValidatingAdmissionPolicyExpansion
     }
     
     // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
     type validatingAdmissionPolicies struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
     func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies {
     	return &validatingAdmissionPolicies{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration](
    +			"validatingadmissionpolicies",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} },
    +			func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
    -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
    -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ValidatingAdmissionPolicyList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
    -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicy and creates it.  Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
    -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicies").
    -		Name(validatingAdmissionPolicy.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicies").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicies").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy.
    -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
    -	if validatingAdmissionPolicy == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := validatingAdmissionPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.ValidatingAdmissionPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicies").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    index bba37bb047..8168d8cbcd 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface.
    @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface {
     
     // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
     type validatingAdmissionPolicyBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]
     }
     
     // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
     func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings {
     	return &validatingAdmissionPolicyBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration](
    +			"validatingadmissionpolicybindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} },
    +			func() *v1beta1.ValidatingAdmissionPolicyBindingList {
    +				return &v1beta1.ValidatingAdmissionPolicyBindingList{}
    +			}),
     	}
     }
    -
    -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
    -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
    -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ValidatingAdmissionPolicyBindingList{}
    -	err = c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
    -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it.  Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Post().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
    -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Put().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(validatingAdmissionPolicyBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingAdmissionPolicyBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs.
    -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingadmissionpolicybindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    -	result = &v1beta1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding.
    -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
    -	if validatingAdmissionPolicyBinding == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingAdmissionPolicyBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingAdmissionPolicyBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ValidatingAdmissionPolicyBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingadmissionpolicybindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    index 5ba5974d7a..5abd96823d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface.
    @@ -55,143 +52,20 @@ type ValidatingWebhookConfigurationInterface interface {
     
     // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
     type validatingWebhookConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]
     }
     
     // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations
     func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations {
     	return &validatingWebhookConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration](
    +			"validatingwebhookconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} },
    +			func() *v1beta1.ValidatingWebhookConfigurationList {
    +				return &v1beta1.ValidatingWebhookConfigurationList{}
    +			}),
     	}
     }
    -
    -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
    -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1beta1.ValidatingWebhookConfiguration{}
    -	err = c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
    -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ValidatingWebhookConfigurationList{}
    -	err = c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
    -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
    -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1beta1.ValidatingWebhookConfiguration{}
    -	err = c.client.Post().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
    -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1beta1.ValidatingWebhookConfiguration{}
    -	err = c.client.Put().
    -		Resource("validatingwebhookconfigurations").
    -		Name(validatingWebhookConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(validatingWebhookConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
    -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("validatingwebhookconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched validatingWebhookConfiguration.
    -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    -	result = &v1beta1.ValidatingWebhookConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("validatingwebhookconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration.
    -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
    -	if validatingWebhookConfiguration == nil {
    -		return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(validatingWebhookConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := validatingWebhookConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ValidatingWebhookConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("validatingwebhookconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go
    index 738c68038b..e9f0b78d46 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go
    @@ -43,20 +43,22 @@ var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion")
     
     // Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any.
     func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) {
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootGetActionWithOptions(storageversionsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
     
     // List takes label and field selectors, and returns the list of StorageVersions that match those selectors.
     func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) {
    +	emptyResult := &v1alpha1.StorageVersionList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{})
    +		Invokes(testing.NewRootListActionWithOptions(storageversionsResource, storageversionsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested storageVersions.
     func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionsResource, opts))
     }
     
     // Create takes the representation of a storageVersion and creates it.  Returns the server's representation of the storageVersion, and an error, if there is any.
     func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) {
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootCreateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
     
     // Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any.
     func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) {
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) {
    +func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) {
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionsResource, "status", storageVersion, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(storageversionsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.Dele
     
     // Patch applies the patch and returns the patched storageVersion.
     func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) {
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeStorageVersions) Apply(ctx context.Context, storageVersion *apiserv
     	if name == nil {
     		return nil, fmt.Errorf("storageVersion.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeStorageVersions) ApplyStatus(ctx context.Context, storageVersion *a
     	if name == nil {
     		return nil, fmt.Errorf("storageVersion.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.StorageVersion{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersion{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersion), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
    index 18789c7f82..436593f7fa 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StorageVersionsGetter has a method to return a StorageVersionInterface.
    @@ -43,6 +40,7 @@ type StorageVersionsGetter interface {
     type StorageVersionInterface interface {
     	Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error)
     	Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type StorageVersionInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error)
     	Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error)
     	StorageVersionExpansion
     }
     
     // storageVersions implements StorageVersionInterface
     type storageVersions struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]
     }
     
     // newStorageVersions returns a StorageVersions
     func newStorageVersions(c *InternalV1alpha1Client) *storageVersions {
     	return &storageVersions{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration](
    +			"storageversions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} },
    +			func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }),
     	}
     }
    -
    -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any.
    -func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) {
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Get().
    -		Resource("storageversions").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors.
    -func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.StorageVersionList{}
    -	err = c.client.Get().
    -		Resource("storageversions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested storageVersions.
    -func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("storageversions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a storageVersion and creates it.  Returns the server's representation of the storageVersion, and an error, if there is any.
    -func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) {
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Post().
    -		Resource("storageversions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersion).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any.
    -func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) {
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Put().
    -		Resource("storageversions").
    -		Name(storageVersion.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersion).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) {
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Put().
    -		Resource("storageversions").
    -		Name(storageVersion.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersion).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs.
    -func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("storageversions").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("storageversions").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched storageVersion.
    -func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) {
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Patch(pt).
    -		Resource("storageversions").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion.
    -func (c *storageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) {
    -	if storageVersion == nil {
    -		return nil, fmt.Errorf("storageVersion provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageVersion)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := storageVersion.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageVersion.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageversions").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *storageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) {
    -	if storageVersion == nil {
    -		return nil, fmt.Errorf("storageVersion provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageVersion)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := storageVersion.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageVersion.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha1.StorageVersion{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageversions").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
    index f4b198265d..252f47ba29 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/apps/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
    @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface {
     
     // controllerRevisions implements ControllerRevisionInterface
     type controllerRevisions struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]
     }
     
     // newControllerRevisions returns a ControllerRevisions
     func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions {
     	return &controllerRevisions{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration](
    +			"controllerrevisions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ControllerRevision { return &v1.ControllerRevision{} },
    +			func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }),
     	}
     }
    -
    -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
    -func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) {
    -	result = &v1.ControllerRevision{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
    -func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ControllerRevisionList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested controllerRevisions.
    -func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) {
    -	result = &v1.ControllerRevision{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) {
    -	result = &v1.ControllerRevision{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(controllerRevision.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
    -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched controllerRevision.
    -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) {
    -	result = &v1.ControllerRevision{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision.
    -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) {
    -	if controllerRevision == nil {
    -		return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(controllerRevision)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := controllerRevision.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
    -	}
    -	result = &v1.ControllerRevision{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
    index 53e5392879..28917a7960 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/apps/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DaemonSetsGetter has a method to return a DaemonSetInterface.
    @@ -43,6 +40,7 @@ type DaemonSetsGetter interface {
     type DaemonSetInterface interface {
     	Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error)
     	Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type DaemonSetInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error)
     	Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error)
     	DaemonSetExpansion
     }
     
     // daemonSets implements DaemonSetInterface
     type daemonSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]
     }
     
     // newDaemonSets returns a DaemonSets
     func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets {
     	return &daemonSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration](
    +			"daemonsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.DaemonSet { return &v1.DaemonSet{} },
    +			func() *v1.DaemonSetList { return &v1.DaemonSetList{} }),
     	}
     }
    -
    -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
    -func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) {
    -	result = &v1.DaemonSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
    -func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.DaemonSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested daemonSets.
    -func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) {
    -	result = &v1.DaemonSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
    -	result = &v1.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
    -	result = &v1.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
    -func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched daemonSet.
    -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) {
    -	result = &v1.DaemonSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet.
    -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -	result = &v1.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
    index ccc2049ff7..871d51cfe2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
    @@ -22,7 +22,6 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/apps/v1"
     	autoscalingv1 "k8s.io/api/autoscaling/v1"
    @@ -31,8 +30,8 @@ import (
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
     	applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DeploymentsGetter has a method to return a DeploymentInterface.
    @@ -45,6 +44,7 @@ type DeploymentsGetter interface {
     type DeploymentInterface interface {
     	Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error)
     	Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -53,6 +53,7 @@ type DeploymentInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error)
     	Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
     	GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
     	UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
    @@ -63,209 +64,27 @@ type DeploymentInterface interface {
     
     // deployments implements DeploymentInterface
     type deployments struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]
     }
     
     // newDeployments returns a Deployments
     func newDeployments(c *AppsV1Client, namespace string) *deployments {
     	return &deployments{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration](
    +			"deployments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Deployment { return &v1.Deployment{} },
    +			func() *v1.DeploymentList { return &v1.DeploymentList{} }),
     	}
     }
     
    -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
    -func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) {
    -	result = &v1.Deployment{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Deployments that match those selectors.
    -func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.DeploymentList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested deployments.
    -func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) {
    -	result = &v1.Deployment{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
    -	result = &v1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
    -	result = &v1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
    -func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched deployment.
    -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) {
    -	result = &v1.Deployment{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment.
    -func (c *deployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -	result = &v1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
     func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    @@ -278,8 +97,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    @@ -303,8 +122,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca
     	}
     
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
    index f691ba9acd..c609ef534a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
    @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision
     
     // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
     func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) {
    +	emptyResult := &v1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1.ControllerRevision{})
    +		Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ControllerRevision), err
     }
     
     // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
     func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) {
    +	emptyResult := &v1.ControllerRevisionList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1.ControllerRevisionList{})
    +		Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOpti
     // Watch returns a watch.Interface that watches the requested controllerRevisions.
     func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) {
    +	emptyResult := &v1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{})
    +		Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ControllerRevision), err
     }
     
     // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) {
    +	emptyResult := &v1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{})
    +		Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ControllerRevision), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts met
     
     // Patch applies the patch and returns the patched controllerRevision.
     func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) {
    +	emptyResult := &v1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ControllerRevision), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision
     	if name == nil {
     		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ControllerRevision), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
    index 3e0df72352..bac3fc1225 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
    @@ -44,22 +44,24 @@ var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet")
     
     // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
     func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) {
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1.DaemonSet{})
    +		Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
     
     // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
     func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) {
    +	emptyResult := &v1.DaemonSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1.DaemonSetList{})
    +		Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested daemonSets.
     func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) {
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{})
    +		Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
     
     // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) {
    +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1.DaemonSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.DaemonSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.Delet
     
     // Patch applies the patch and returns the patched daemonSet.
     func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) {
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetA
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.Daem
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.DaemonSet), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
    index da1896fe60..8ed8432883 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
    @@ -46,22 +46,24 @@ var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment")
     
     // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
     func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) {
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1.Deployment{})
    +		Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
     
     // List takes label and field selectors, and returns the list of Deployments that match those selectors.
     func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) {
    +	emptyResult := &v1.DeploymentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1.DeploymentList{})
    +		Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -80,40 +82,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested deployments.
     func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) {
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{})
    +		Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
     
     // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{})
    +		Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) {
    +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1.Deployment{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
    @@ -128,7 +133,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.DeploymentList{})
     	return err
    @@ -136,11 +141,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.Dele
     
     // Patch applies the patch and returns the patched deployment.
     func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) {
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
    @@ -158,11 +164,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.Deployme
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
    @@ -181,33 +188,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.De
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Deployment), err
     }
     
     // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    @@ -222,11 +232,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string,
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
    index dedf19b42f..942a4e64a3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
    @@ -46,22 +46,24 @@ var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet")
     
     // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
     func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) {
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1.ReplicaSet{})
    +		Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
     
     // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
     func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) {
    +	emptyResult := &v1.ReplicaSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1.ReplicaSetList{})
    +		Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -80,40 +82,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested replicaSets.
     func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) {
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{})
    +		Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
     
     // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) {
    +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1.ReplicaSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
    @@ -128,7 +133,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ReplicaSetList{})
     	return err
    @@ -136,11 +141,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.Dele
     
     // Patch applies the patch and returns the patched replicaSet.
     func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) {
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
    @@ -158,11 +164,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaS
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
    @@ -181,33 +188,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.Re
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicaSet), err
     }
     
     // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    @@ -222,11 +232,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string,
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
    index f1d7d96e8d..ae4e811fb7 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
    @@ -46,22 +46,24 @@ var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet")
     
     // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
     func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) {
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1.StatefulSet{})
    +		Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
     
     // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
     func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) {
    +	emptyResult := &v1.StatefulSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1.StatefulSetList{})
    +		Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -80,40 +82,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (r
     // Watch returns a watch.Interface that watches the requested statefulSets.
     func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) {
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{})
    +		Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
     
     // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) {
    +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1.StatefulSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
    @@ -128,7 +133,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1.
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.StatefulSetList{})
     	return err
    @@ -136,11 +141,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.Del
     
     // Patch applies the patch and returns the patched statefulSet.
     func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) {
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
    @@ -158,11 +164,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.Statef
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
    @@ -181,33 +188,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StatefulSet), err
     }
     
     // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    @@ -222,11 +232,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
    index 917ed521f4..d6dec016bb 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
    @@ -22,7 +22,6 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/apps/v1"
     	autoscalingv1 "k8s.io/api/autoscaling/v1"
    @@ -31,8 +30,8 @@ import (
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
     	applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ReplicaSetsGetter has a method to return a ReplicaSetInterface.
    @@ -45,6 +44,7 @@ type ReplicaSetsGetter interface {
     type ReplicaSetInterface interface {
     	Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error)
     	Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -53,6 +53,7 @@ type ReplicaSetInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error)
     	Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error)
     	GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
     	UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
    @@ -63,209 +64,27 @@ type ReplicaSetInterface interface {
     
     // replicaSets implements ReplicaSetInterface
     type replicaSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]
     }
     
     // newReplicaSets returns a ReplicaSets
     func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets {
     	return &replicaSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration](
    +			"replicasets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ReplicaSet { return &v1.ReplicaSet{} },
    +			func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }),
     	}
     }
     
    -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
    -func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) {
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
    -func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ReplicaSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested replicaSets.
    -func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) {
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
    -func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched replicaSet.
    -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) {
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet.
    -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
     func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    @@ -278,8 +97,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    @@ -303,8 +122,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca
     	}
     
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
    index d1fbb915d8..b25ed07238 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
    @@ -22,7 +22,6 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/apps/v1"
     	autoscalingv1 "k8s.io/api/autoscaling/v1"
    @@ -31,8 +30,8 @@ import (
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
     	applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StatefulSetsGetter has a method to return a StatefulSetInterface.
    @@ -45,6 +44,7 @@ type StatefulSetsGetter interface {
     type StatefulSetInterface interface {
     	Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error)
     	Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -53,6 +53,7 @@ type StatefulSetInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error)
     	Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error)
     	GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
     	UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
    @@ -63,209 +64,27 @@ type StatefulSetInterface interface {
     
     // statefulSets implements StatefulSetInterface
     type statefulSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]
     }
     
     // newStatefulSets returns a StatefulSets
     func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets {
     	return &statefulSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration](
    +			"statefulsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.StatefulSet { return &v1.StatefulSet{} },
    +			func() *v1.StatefulSetList { return &v1.StatefulSetList{} }),
     	}
     }
     
    -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
    -func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) {
    -	result = &v1.StatefulSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
    -func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.StatefulSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested statefulSets.
    -func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) {
    -	result = &v1.StatefulSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
    -	result = &v1.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
    -	result = &v1.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
    -func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched statefulSet.
    -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) {
    -	result = &v1.StatefulSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet.
    -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -	result = &v1.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
     func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    @@ -278,8 +97,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    @@ -303,8 +122,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s
     	}
     
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
    index 0c3f49ba14..185f7cc4e0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/apps/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
    @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface {
     
     // controllerRevisions implements ControllerRevisionInterface
     type controllerRevisions struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]
     }
     
     // newControllerRevisions returns a ControllerRevisions
     func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions {
     	return &controllerRevisions{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration](
    +			"controllerrevisions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} },
    +			func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }),
     	}
     }
    -
    -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
    -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
    -	result = &v1beta1.ControllerRevision{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
    -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ControllerRevisionList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested controllerRevisions.
    -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) {
    -	result = &v1beta1.ControllerRevision{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) {
    -	result = &v1beta1.ControllerRevision{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(controllerRevision.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
    -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched controllerRevision.
    -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
    -	result = &v1beta1.ControllerRevision{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision.
    -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) {
    -	if controllerRevision == nil {
    -		return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(controllerRevision)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := controllerRevision.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ControllerRevision{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
    index 281758c435..06e4b7bf93 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/apps/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DeploymentsGetter has a method to return a DeploymentInterface.
    @@ -43,6 +40,7 @@ type DeploymentsGetter interface {
     type DeploymentInterface interface {
     	Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
     	Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type DeploymentInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
     	Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
     	DeploymentExpansion
     }
     
     // deployments implements DeploymentInterface
     type deployments struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]
     }
     
     // newDeployments returns a Deployments
     func newDeployments(c *AppsV1beta1Client, namespace string) *deployments {
     	return &deployments{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration](
    +			"deployments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Deployment { return &v1beta1.Deployment{} },
    +			func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }),
     	}
     }
    -
    -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
    -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Deployments that match those selectors.
    -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.DeploymentList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested deployments.
    -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
    -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched deployment.
    -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment.
    -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
    index 1954c94703..7ea2b2e11b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
    @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRev
     
     // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
     func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
    +	emptyResult := &v1beta1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{})
    +		Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ControllerRevision), err
     }
     
     // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
     func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
    +	emptyResult := &v1beta1.ControllerRevisionList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{})
    +		Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions)
     // Watch returns a watch.Interface that watches the requested controllerRevisions.
     func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) {
    +	emptyResult := &v1beta1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{})
    +		Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ControllerRevision), err
     }
     
     // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) {
    +	emptyResult := &v1beta1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{})
    +		Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ControllerRevision), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.
     
     // Patch applies the patch and returns the patched controllerRevision.
     func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
    +	emptyResult := &v1beta1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ControllerRevision), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision
     	if name == nil {
     		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ControllerRevision), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
    index 9614852f74..05c557ecb3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
    @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment")
     
     // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
     func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
    +		Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // List takes label and field selectors, and returns the list of Deployments that match those selectors.
     func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
    +	emptyResult := &v1beta1.DeploymentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{})
    +		Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested deployments.
     func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) {
    +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched deployment.
     func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta1.Dep
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
    index 2124515cfe..c38690554a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
    @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet")
     
     // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
     func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
     
     // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
     func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
    +	emptyResult := &v1beta1.StatefulSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{})
    +		Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested statefulSets.
     func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) {
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
     
     // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) {
    +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched statefulSet.
     func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) {
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.S
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StatefulSet), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
    index 3f1aebcffb..1ff69eb993 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/apps/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StatefulSetsGetter has a method to return a StatefulSetInterface.
    @@ -43,6 +40,7 @@ type StatefulSetsGetter interface {
     type StatefulSetInterface interface {
     	Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error)
     	Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type StatefulSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error)
     	Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error)
     	StatefulSetExpansion
     }
     
     // statefulSets implements StatefulSetInterface
     type statefulSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]
     }
     
     // newStatefulSets returns a StatefulSets
     func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets {
     	return &statefulSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration](
    +			"statefulsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} },
    +			func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }),
     	}
     }
    -
    -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
    -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
    -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.StatefulSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested statefulSets.
    -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) {
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
    -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched statefulSet.
    -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) {
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet.
    -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
    index e1643277a6..6caee6a725 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/apps/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
    @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface {
     
     // controllerRevisions implements ControllerRevisionInterface
     type controllerRevisions struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]
     }
     
     // newControllerRevisions returns a ControllerRevisions
     func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions {
     	return &controllerRevisions{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration](
    +			"controllerrevisions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} },
    +			func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }),
     	}
     }
    -
    -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
    -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
    -	result = &v1beta2.ControllerRevision{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
    -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.ControllerRevisionList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested controllerRevisions.
    -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) {
    -	result = &v1beta2.ControllerRevision{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
    -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) {
    -	result = &v1beta2.ControllerRevision{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(controllerRevision.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(controllerRevision).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
    -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched controllerRevision.
    -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
    -	result = &v1beta2.ControllerRevision{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision.
    -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) {
    -	if controllerRevision == nil {
    -		return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(controllerRevision)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := controllerRevision.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.ControllerRevision{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("controllerrevisions").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
    index 1391df87d2..766dc6d433 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/apps/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DaemonSetsGetter has a method to return a DaemonSetInterface.
    @@ -43,6 +40,7 @@ type DaemonSetsGetter interface {
     type DaemonSetInterface interface {
     	Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error)
     	Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type DaemonSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error)
     	Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error)
     	DaemonSetExpansion
     }
     
     // daemonSets implements DaemonSetInterface
     type daemonSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]
     }
     
     // newDaemonSets returns a DaemonSets
     func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets {
     	return &daemonSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration](
    +			"daemonsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} },
    +			func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }),
     	}
     }
    -
    -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
    -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
    -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.DaemonSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested daemonSets.
    -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) {
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
    -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched daemonSet.
    -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) {
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet.
    -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
    index 5bda0d92c1..6592ee8cd9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/apps/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DeploymentsGetter has a method to return a DeploymentInterface.
    @@ -43,6 +40,7 @@ type DeploymentsGetter interface {
     type DeploymentInterface interface {
     	Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error)
     	Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type DeploymentInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error)
     	Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error)
     	DeploymentExpansion
     }
     
     // deployments implements DeploymentInterface
     type deployments struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]
     }
     
     // newDeployments returns a Deployments
     func newDeployments(c *AppsV1beta2Client, namespace string) *deployments {
     	return &deployments{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration](
    +			"deployments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta2.Deployment { return &v1beta2.Deployment{} },
    +			func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }),
     	}
     }
    -
    -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
    -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Deployments that match those selectors.
    -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.DeploymentList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested deployments.
    -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) {
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
    -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched deployment.
    -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) {
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment.
    -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
    index 1bf7fb3314..45b2050706 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
    @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRev
     
     // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
     func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
    +	emptyResult := &v1beta2.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{})
    +		Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ControllerRevision), err
     }
     
     // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
     func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
    +	emptyResult := &v1beta2.ControllerRevisionList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{})
    +		Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions)
     // Watch returns a watch.Interface that watches the requested controllerRevisions.
     func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) {
    +	emptyResult := &v1beta2.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{})
    +		Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ControllerRevision), err
     }
     
     // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
     func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) {
    +	emptyResult := &v1beta2.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{})
    +		Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ControllerRevision), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.
     
     // Patch applies the patch and returns the patched controllerRevision.
     func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
    +	emptyResult := &v1beta2.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ControllerRevision), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision
     	if name == nil {
     		return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.ControllerRevision{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ControllerRevision{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ControllerRevision), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
    index 8f5cfa5a8a..61ceeb1411 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
    @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet")
     
     // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
     func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
     
     // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
     func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
    +	emptyResult := &v1beta2.DaemonSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{})
    +		Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested daemonSets.
     func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) {
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
     
     // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) {
    +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt
     
     // Patch applies the patch and returns the patched daemonSet.
     func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) {
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.Daemo
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.DaemonSet), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
    index c9e8ab48bb..d849856a40 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
    @@ -44,22 +44,24 @@ var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment")
     
     // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
     func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{})
    +		Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
     
     // List takes label and field selectors, and returns the list of Deployments that match those selectors.
     func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
    +	emptyResult := &v1beta2.DeploymentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{})
    +		Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested deployments.
     func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) {
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{})
    +		Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
     
     // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{})
    +		Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) {
    +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched deployment.
     func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) {
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta2.Dep
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Deployment), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
    index 46e1a78a7a..1f957f0843 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
    @@ -44,22 +44,24 @@ var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet")
     
     // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
     func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
     
     // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
     func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
    +	emptyResult := &v1beta2.ReplicaSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{})
    +		Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested replicaSets.
     func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) {
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
     
     // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) {
    +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched replicaSet.
     func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.Rep
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1bet
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.ReplicaSet), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
    index 684f799256..ac8945aa77 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
    @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet")
     
     // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
     func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
     
     // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
     func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
    +	emptyResult := &v1beta2.StatefulSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{})
    +		Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested statefulSets.
     func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) {
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
     
     // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
     func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) {
    +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched statefulSet.
     func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) {
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.S
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
    @@ -179,33 +186,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b
     	if name == nil {
     		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.StatefulSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.StatefulSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.StatefulSet), err
     }
     
     // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
    +	emptyResult := &v1beta2.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) {
    +	emptyResult := &v1beta2.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &v1beta2.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Scale), err
     }
    @@ -220,11 +230,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &v1beta2.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &v1beta2.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
    index 988d898f79..90380ca980 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/apps/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ReplicaSetsGetter has a method to return a ReplicaSetInterface.
    @@ -43,6 +40,7 @@ type ReplicaSetsGetter interface {
     type ReplicaSetInterface interface {
     	Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error)
     	Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type ReplicaSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error)
     	Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error)
     	ReplicaSetExpansion
     }
     
     // replicaSets implements ReplicaSetInterface
     type replicaSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]
     }
     
     // newReplicaSets returns a ReplicaSets
     func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets {
     	return &replicaSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration](
    +			"replicasets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} },
    +			func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }),
     	}
     }
    -
    -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
    -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
    -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.ReplicaSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested replicaSets.
    -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) {
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
    -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched replicaSet.
    -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet.
    -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
    index 0416675d6d..f2d673abb9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
    @@ -22,15 +22,14 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/apps/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StatefulSetsGetter has a method to return a StatefulSetInterface.
    @@ -43,6 +42,7 @@ type StatefulSetsGetter interface {
     type StatefulSetInterface interface {
     	Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error)
     	Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,6 +51,7 @@ type StatefulSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error)
     	Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error)
     	GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
     	UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error)
    @@ -61,209 +62,27 @@ type StatefulSetInterface interface {
     
     // statefulSets implements StatefulSetInterface
     type statefulSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]
     }
     
     // newStatefulSets returns a StatefulSets
     func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets {
     	return &statefulSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration](
    +			"statefulsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} },
    +			func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }),
     	}
     }
     
    -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
    -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
    -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.StatefulSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested statefulSets.
    -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) {
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
    -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(statefulSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(statefulSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
    -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched statefulSet.
    -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) {
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet.
    -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) {
    -	if statefulSet == nil {
    -		return nil, fmt.Errorf("statefulSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(statefulSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := statefulSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("statefulSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.StatefulSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("statefulsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any.
     func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
     	result = &v1beta2.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    @@ -276,8 +95,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) {
     	result = &v1beta2.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    @@ -301,8 +120,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s
     	}
     
     	result = &v1beta2.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("statefulsets").
     		Name(statefulSetName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go
    index e683b3eaaa..7e7c3138a5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go
    @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview")
     
     // Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
     func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) {
    +	emptyResult := &v1.SelfSubjectReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1.SelfSubjectReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.SelfSubjectReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
    index 500e87d065..a22f335429 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
    @@ -37,10 +37,11 @@ var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview")
     
     // Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
     func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) {
    +	emptyResult := &v1.TokenReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.TokenReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
    index bfb9603d67..720dd9e7e9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authentication/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface {
     
     // selfSubjectReviews implements SelfSubjectReviewInterface
     type selfSubjectReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1.SelfSubjectReview]
     }
     
     // newSelfSubjectReviews returns a SelfSubjectReviews
     func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews {
     	return &selfSubjectReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1.SelfSubjectReview](
    +			"selfsubjectreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
    -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) {
    -	result = &v1.SelfSubjectReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
    index ca7cd47d26..52c55fab08 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authentication/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // TokenReviewsGetter has a method to return a TokenReviewInterface.
    @@ -41,24 +41,17 @@ type TokenReviewInterface interface {
     
     // tokenReviews implements TokenReviewInterface
     type tokenReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1.TokenReview]
     }
     
     // newTokenReviews returns a TokenReviews
     func newTokenReviews(c *AuthenticationV1Client) *tokenReviews {
     	return &tokenReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1.TokenReview](
    +			"tokenreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.TokenReview { return &v1.TokenReview{} }),
     	}
     }
    -
    -// Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
    -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) {
    -	result = &v1.TokenReview{}
    -	err = c.client.Post().
    -		Resource("tokenreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(tokenReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go
    index a20b3dd764..680460f459 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go
    @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectRe
     
     // Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
     func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) {
    +	emptyResult := &v1alpha1.SelfSubjectReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1alpha1.SelfSubjectReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.SelfSubjectReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
    index 7f8b12a46f..f034bcdbe3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1alpha1 "k8s.io/api/authentication/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface {
     
     // selfSubjectReviews implements SelfSubjectReviewInterface
     type selfSubjectReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1alpha1.SelfSubjectReview]
     }
     
     // newSelfSubjectReviews returns a SelfSubjectReviews
     func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews {
     	return &selfSubjectReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1alpha1.SelfSubjectReview](
    +			"selfsubjectreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
    -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) {
    -	result = &v1alpha1.SelfSubjectReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go
    index 4a9db85cf5..33e130e9cc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go
    @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRev
     
     // Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
     func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) {
    +	emptyResult := &v1beta1.SelfSubjectReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1beta1.SelfSubjectReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.SelfSubjectReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
    index b1988a67a3..b512f5c146 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
    @@ -37,10 +37,11 @@ var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview")
     
     // Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
     func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) {
    +	emptyResult := &v1beta1.TokenReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.TokenReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
    index 9d54826a31..d083ba8fa9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authentication/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface {
     
     // selfSubjectReviews implements SelfSubjectReviewInterface
     type selfSubjectReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1beta1.SelfSubjectReview]
     }
     
     // newSelfSubjectReviews returns a SelfSubjectReviews
     func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews {
     	return &selfSubjectReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1beta1.SelfSubjectReview](
    +			"selfsubjectreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectReview and creates it.  Returns the server's representation of the selfSubjectReview, and an error, if there is any.
    -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) {
    -	result = &v1beta1.SelfSubjectReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
    index 5da1224337..982534935e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authentication/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // TokenReviewsGetter has a method to return a TokenReviewInterface.
    @@ -41,24 +41,17 @@ type TokenReviewInterface interface {
     
     // tokenReviews implements TokenReviewInterface
     type tokenReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1beta1.TokenReview]
     }
     
     // newTokenReviews returns a TokenReviews
     func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews {
     	return &tokenReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1beta1.TokenReview](
    +			"tokenreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }),
     	}
     }
    -
    -// Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
    -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) {
    -	result = &v1beta1.TokenReview{}
    -	err = c.client.Post().
    -		Resource("tokenreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(tokenReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
    index 43ea05328c..dd23481d39 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
    @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubject
     
     // Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
     func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) {
    +	emptyResult := &v1.LocalSubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{})
    +		Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LocalSubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
    index 27642266d6..d04b8502f3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
    @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAc
     
     // Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
     func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) {
    +	emptyResult := &v1.SelfSubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.SelfSubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
    index cd6c682d16..71ed326f8b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
    @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRul
     
     // Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
     func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) {
    +	emptyResult := &v1.SelfSubjectRulesReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.SelfSubjectRulesReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
    index 09dab64807..358ba9aa77 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
    @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessRevi
     
     // Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
     func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) {
    +	emptyResult := &v1.SubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.SubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
    index 84b2efe166..3d058941a2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authorization/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface.
    @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface {
     
     // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
     type localSubjectAccessReviews struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.Client[*v1.LocalSubjectAccessReview]
     }
     
     // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
     func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews {
     	return &localSubjectAccessReviews{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClient[*v1.LocalSubjectAccessReview](
    +			"localsubjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
    -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) {
    -	result = &v1.LocalSubjectAccessReview{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("localsubjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(localSubjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
    index 2006196c11..9e874bee5a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authorization/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface {
     
     // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
     type selfSubjectAccessReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1.SelfSubjectAccessReview]
     }
     
     // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
     func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews {
     	return &selfSubjectAccessReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1.SelfSubjectAccessReview](
    +			"selfsubjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
    -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) {
    -	result = &v1.SelfSubjectAccessReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
    index 25d99f7b52..567b63ec4c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authorization/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface {
     
     // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
     type selfSubjectRulesReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1.SelfSubjectRulesReview]
     }
     
     // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
     func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews {
     	return &selfSubjectRulesReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1.SelfSubjectRulesReview](
    +			"selfsubjectrulesreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
    -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) {
    -	result = &v1.SelfSubjectRulesReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectrulesreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectRulesReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
    index 8ac0566a2e..52e8d74e57 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1 "k8s.io/api/authorization/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface.
    @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface {
     
     // subjectAccessReviews implements SubjectAccessReviewInterface
     type subjectAccessReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1.SubjectAccessReview]
     }
     
     // newSubjectAccessReviews returns a SubjectAccessReviews
     func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews {
     	return &subjectAccessReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1.SubjectAccessReview](
    +			"subjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
    -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) {
    -	result = &v1.SubjectAccessReview{}
    -	err = c.client.Post().
    -		Resource("subjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(subjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
    index 104e979d19..e2bf627736 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
    @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSu
     
     // Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
     func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) {
    +	emptyResult := &v1beta1.LocalSubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{})
    +		Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.LocalSubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
    index 517e48b760..996e4d4108 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
    @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubj
     
     // Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
     func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) {
    +	emptyResult := &v1beta1.SelfSubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.SelfSubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
    index 3aed050fcf..6e4c758909 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
    @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubje
     
     // Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
     func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) {
    +	emptyResult := &v1beta1.SelfSubjectRulesReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.SelfSubjectRulesReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
    index e9bfa521a2..aab6e08dc2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
    @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAcces
     
     // Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
     func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) {
    +	emptyResult := &v1beta1.SubjectAccessReview{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{})
    +		Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.SubjectAccessReview), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
    index 78584ba945..302c094b39 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authorization/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface.
    @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface {
     
     // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
     type localSubjectAccessReviews struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.Client[*v1beta1.LocalSubjectAccessReview]
     }
     
     // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
     func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews {
     	return &localSubjectAccessReviews{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClient[*v1beta1.LocalSubjectAccessReview](
    +			"localsubjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
    -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) {
    -	result = &v1beta1.LocalSubjectAccessReview{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("localsubjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(localSubjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
    index 0286c93fe6..4b413dc4f0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authorization/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface {
     
     // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
     type selfSubjectAccessReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1beta1.SelfSubjectAccessReview]
     }
     
     // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
     func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews {
     	return &selfSubjectAccessReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1beta1.SelfSubjectAccessReview](
    +			"selfsubjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
    -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) {
    -	result = &v1beta1.SelfSubjectAccessReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
    index d772973ec6..b64cec3015 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authorization/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface.
    @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface {
     
     // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
     type selfSubjectRulesReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1beta1.SelfSubjectRulesReview]
     }
     
     // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
     func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews {
     	return &selfSubjectRulesReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1beta1.SelfSubjectRulesReview](
    +			"selfsubjectrulesreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }),
     	}
     }
    -
    -// Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
    -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) {
    -	result = &v1beta1.SelfSubjectRulesReview{}
    -	err = c.client.Post().
    -		Resource("selfsubjectrulesreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(selfSubjectRulesReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
    index aebe8398c0..3fca833a1b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
    @@ -23,8 +23,8 @@ import (
     
     	v1beta1 "k8s.io/api/authorization/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface.
    @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface {
     
     // subjectAccessReviews implements SubjectAccessReviewInterface
     type subjectAccessReviews struct {
    -	client rest.Interface
    +	*gentype.Client[*v1beta1.SubjectAccessReview]
     }
     
     // newSubjectAccessReviews returns a SubjectAccessReviews
     func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews {
     	return &subjectAccessReviews{
    -		client: c.RESTClient(),
    +		gentype.NewClient[*v1beta1.SubjectAccessReview](
    +			"subjectaccessreviews",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }),
     	}
     }
    -
    -// Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
    -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) {
    -	result = &v1beta1.SubjectAccessReview{}
    -	err = c.client.Post().
    -		Resource("subjectaccessreviews").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(subjectAccessReview).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
    index a2c95b7539..23e2c391dd 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
    @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPod
     
     // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
     func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
     
     // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
     func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscalerList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v1.HorizontalPodAutoscalerList{})
    +		Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.Lis
     // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
     func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
     
     // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) {
    +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string,
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt
     
     // Patch applies the patch and returns the patched horizontalPodAutoscaler.
     func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.HorizontalPodAutoscaler), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
    index 19afde66db..4d29ac5227 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/autoscaling/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
    @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface {
     type HorizontalPodAutoscalerInterface interface {
     	Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error)
     	Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error)
     	Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error)
     	HorizontalPodAutoscalerExpansion
     }
     
     // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
     type horizontalPodAutoscalers struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]
     }
     
     // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
     func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers {
     	return &horizontalPodAutoscalers{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration](
    +			"horizontalpodautoscalers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} },
    +			func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }),
     	}
     }
    -
    -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
    -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
    -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.HorizontalPodAutoscalerList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
    -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
    -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) {
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go
    index cfcc208232..2ca3d27c94 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go
    @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPod
     
     // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
     func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
     
     // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
     func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscalerList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2.HorizontalPodAutoscalerList{})
    +		Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt
     // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
     func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
     
     // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) {
    +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string,
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt
     
     // Patch applies the patch and returns the patched horizontalPodAutoscaler.
     func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2.HorizontalPodAutoscaler), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
    index 3a077d71da..dbce8d1020 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
    @@ -20,17 +20,14 @@ package v2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v2 "k8s.io/api/autoscaling/v2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
    @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface {
     type HorizontalPodAutoscalerInterface interface {
     	Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error)
     	Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error)
     	Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error)
     	HorizontalPodAutoscalerExpansion
     }
     
     // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
     type horizontalPodAutoscalers struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]
     }
     
     // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
     func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers {
     	return &horizontalPodAutoscalers{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration](
    +			"horizontalpodautoscalers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} },
    +			func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }),
     	}
     }
    -
    -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
    -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
    -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v2.HorizontalPodAutoscalerList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
    -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
    -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) {
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -
    -	result = &v2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
    index 0b2658e642..7f99b5e8fc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
    @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("Horizont
     
     // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
     func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
     
     // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
     func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscalerList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{})
    +		Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt
     // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
     func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
     
     // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) {
    +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string,
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt
     
     // Patch applies the patch and returns the patched horizontalPodAutoscaler.
     func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2beta1.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta1.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta1.HorizontalPodAutoscaler), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
    index 5080912a12..6bc1b77766 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
    @@ -20,17 +20,14 @@ package v2beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v2beta1 "k8s.io/api/autoscaling/v2beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
    @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface {
     type HorizontalPodAutoscalerInterface interface {
     	Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
     	Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error)
     	Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error)
     	HorizontalPodAutoscalerExpansion
     }
     
     // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
     type horizontalPodAutoscalers struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]
     }
     
     // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
     func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers {
     	return &horizontalPodAutoscalers{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration](
    +			"horizontalpodautoscalers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} },
    +			func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }),
     	}
     }
    -
    -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
    -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
    -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v2beta1.HorizontalPodAutoscalerList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
    -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
    -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -
    -	result = &v2beta1.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
    index 0a7c93c3d3..e037e8ac48 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
    @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("Horizont
     
     // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
     func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
     
     // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
     func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscalerList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{})
    +		Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt
     // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
     func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
     
     // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
     func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) {
    +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string,
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt
     
     // Patch applies the patch and returns the patched horizontalPodAutoscaler.
     func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont
     	if name == nil {
     		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
     	}
    +	emptyResult := &v2beta2.HorizontalPodAutoscaler{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta2.HorizontalPodAutoscaler{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v2beta2.HorizontalPodAutoscaler), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
    index 0ddb9108b3..6f464661a9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
    @@ -20,17 +20,14 @@ package v2beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v2beta2 "k8s.io/api/autoscaling/v2beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
    @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface {
     type HorizontalPodAutoscalerInterface interface {
     	Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
     	Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error)
     	Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error)
     	HorizontalPodAutoscalerExpansion
     }
     
     // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
     type horizontalPodAutoscalers struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]
     }
     
     // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
     func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers {
     	return &horizontalPodAutoscalers{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration](
    +			"horizontalpodautoscalers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} },
    +			func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }),
     	}
     }
    -
    -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
    -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
    -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v2beta2.HorizontalPodAutoscalerList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
    -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
    -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(horizontalPodAutoscaler.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(horizontalPodAutoscaler).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
    -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler.
    -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
    -	if horizontalPodAutoscaler == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(horizontalPodAutoscaler)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := horizontalPodAutoscaler.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply")
    -	}
    -
    -	result = &v2beta2.HorizontalPodAutoscaler{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("horizontalpodautoscalers").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
    index 9250263215..7907a5bf56 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/batch/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	batchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CronJobsGetter has a method to return a CronJobInterface.
    @@ -43,6 +40,7 @@ type CronJobsGetter interface {
     type CronJobInterface interface {
     	Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error)
     	Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type CronJobInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error)
     	Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error)
     	CronJobExpansion
     }
     
     // cronJobs implements CronJobInterface
     type cronJobs struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]
     }
     
     // newCronJobs returns a CronJobs
     func newCronJobs(c *BatchV1Client, namespace string) *cronJobs {
     	return &cronJobs{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration](
    +			"cronjobs",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.CronJob { return &v1.CronJob{} },
    +			func() *v1.CronJobList { return &v1.CronJobList{} }),
     	}
     }
    -
    -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
    -func (c *cronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) {
    -	result = &v1.CronJob{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
    -func (c *cronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.CronJobList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cronJobs.
    -func (c *cronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
    -func (c *cronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) {
    -	result = &v1.CronJob{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
    -func (c *cronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) {
    -	result = &v1.CronJob{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(cronJob.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) {
    -	result = &v1.CronJob{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(cronJob.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
    -func (c *cronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cronJob.
    -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) {
    -	result = &v1.CronJob{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob.
    -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) {
    -	if cronJob == nil {
    -		return nil, fmt.Errorf("cronJob provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cronJob)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cronJob.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
    -	}
    -	result = &v1.CronJob{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) {
    -	if cronJob == nil {
    -		return nil, fmt.Errorf("cronJob provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cronJob)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := cronJob.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.CronJob{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go
    index 0cbcce6d81..171bb82329 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go
    @@ -44,22 +44,24 @@ var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob")
     
     // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
     func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) {
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1.CronJob{})
    +		Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
     
     // List takes label and field selectors, and returns the list of CronJobs that match those selectors.
     func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) {
    +	emptyResult := &v1.CronJobList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1.CronJobList{})
    +		Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested cronJobs.
     func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
     func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) {
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{})
    +		Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
     
     // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
     func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) {
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{})
    +		Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) {
    +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) {
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1.CronJob{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.CronJobList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteO
     
     // Patch applies the patch and returns the patched cronJob.
     func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) {
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyC
     	if name == nil {
     		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJob
     	if name == nil {
     		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CronJob), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
    index cf1a913bdf..23e66953cb 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
    @@ -44,22 +44,24 @@ var jobsKind = v1.SchemeGroupVersion.WithKind("Job")
     
     // Get takes name of the job, and returns the corresponding job object, and an error if there is any.
     func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) {
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{})
    +		Invokes(testing.NewGetActionWithOptions(jobsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
     
     // List takes label and field selectors, and returns the list of Jobs that match those selectors.
     func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) {
    +	emptyResult := &v1.JobList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &v1.JobList{})
    +		Invokes(testing.NewListActionWithOptions(jobsResource, jobsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v
     // Watch returns a watch.Interface that watches the requested jobs.
     func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(jobsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a job and creates it.  Returns the server's representation of the job, and an error, if there is any.
     func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) {
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{})
    +		Invokes(testing.NewCreateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
     
     // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
     func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{})
    +		Invokes(testing.NewUpdateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) {
    +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(jobsResource, "status", c.ns, job, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOp
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(jobsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.JobList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio
     
     // Patch applies the patch and returns the patched job.
     func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) {
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &v1.Job{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration
     	if name == nil {
     		return nil, fmt.Errorf("job.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Job{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfigu
     	if name == nil {
     		return nil, fmt.Errorf("job.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Job{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Job{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Job), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
    index c076c80af2..83dbe6fa4f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/batch/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	batchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // JobsGetter has a method to return a JobInterface.
    @@ -43,6 +40,7 @@ type JobsGetter interface {
     type JobInterface interface {
     	Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error)
     	Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type JobInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error)
     	Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error)
     	JobExpansion
     }
     
     // jobs implements JobInterface
     type jobs struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]
     }
     
     // newJobs returns a Jobs
     func newJobs(c *BatchV1Client, namespace string) *jobs {
     	return &jobs{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration](
    +			"jobs",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Job { return &v1.Job{} },
    +			func() *v1.JobList { return &v1.JobList{} }),
     	}
     }
    -
    -// Get takes name of the job, and returns the corresponding job object, and an error if there is any.
    -func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) {
    -	result = &v1.Job{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Jobs that match those selectors.
    -func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.JobList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested jobs.
    -func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a job and creates it.  Returns the server's representation of the job, and an error, if there is any.
    -func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) {
    -	result = &v1.Job{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(job).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
    -func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
    -	result = &v1.Job{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(job.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(job).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
    -	result = &v1.Job{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(job.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(job).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the job and deletes it. Returns an error if one occurs.
    -func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched job.
    -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) {
    -	result = &v1.Job{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied job.
    -func (c *jobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) {
    -	if job == nil {
    -		return nil, fmt.Errorf("job provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(job)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := job.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("job.Name must be provided to Apply")
    -	}
    -	result = &v1.Job{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *jobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) {
    -	if job == nil {
    -		return nil, fmt.Errorf("job provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(job)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := job.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("job.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Job{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("jobs").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
    index d687339ae9..a6f7399d84 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/batch/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CronJobsGetter has a method to return a CronJobInterface.
    @@ -43,6 +40,7 @@ type CronJobsGetter interface {
     type CronJobInterface interface {
     	Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error)
     	Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type CronJobInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error)
     	Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error)
     	CronJobExpansion
     }
     
     // cronJobs implements CronJobInterface
     type cronJobs struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]
     }
     
     // newCronJobs returns a CronJobs
     func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs {
     	return &cronJobs{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration](
    +			"cronjobs",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.CronJob { return &v1beta1.CronJob{} },
    +			func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }),
     	}
     }
    -
    -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
    -func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
    -func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.CronJobList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cronJobs.
    -func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
    -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) {
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
    -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(cronJob.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(cronJob.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cronJob).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
    -func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cronJob.
    -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) {
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob.
    -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) {
    -	if cronJob == nil {
    -		return nil, fmt.Errorf("cronJob provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cronJob)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cronJob.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) {
    -	if cronJob == nil {
    -		return nil, fmt.Errorf("cronJob provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cronJob)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := cronJob.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.CronJob{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("cronjobs").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
    index 9d078f55a9..71cd4f1653 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
    @@ -44,22 +44,24 @@ var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob")
     
     // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
     func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{})
    +		Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
     
     // List takes label and field selectors, and returns the list of CronJobs that match those selectors.
     func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
    +	emptyResult := &v1beta1.CronJobList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{})
    +		Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v
     // Watch returns a watch.Interface that watches the requested cronJobs.
     func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
     func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) {
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{})
    +		Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
     
     // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
     func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{})
    +		Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) {
    +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOp
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.CronJobList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptio
     
     // Patch applies the patch and returns the patched cronJob.
     func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) {
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobA
     	if name == nil {
     		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.Cr
     	if name == nil {
     		return nil, fmt.Errorf("cronJob.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CronJob{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.CronJob{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CronJob), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
    index 0d6b68b296..9fa3300e6c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/certificates/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface.
    @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface {
     type CertificateSigningRequestInterface interface {
     	Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error)
     	Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,6 +49,7 @@ type CertificateSigningRequestInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error)
     	Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error)
     	UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
     
    @@ -59,195 +58,26 @@ type CertificateSigningRequestInterface interface {
     
     // certificateSigningRequests implements CertificateSigningRequestInterface
     type certificateSigningRequests struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]
     }
     
     // newCertificateSigningRequests returns a CertificateSigningRequests
     func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests {
     	return &certificateSigningRequests{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration](
    +			"certificatesigningrequests",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} },
    +			func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }),
     	}
     }
     
    -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
    -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) {
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Get().
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
    -func (c *certificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.CertificateSigningRequestList{}
    -	err = c.client.Get().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested certificateSigningRequests.
    -func (c *certificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
    -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) {
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Post().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
    -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Put().
    -		Resource("certificatesigningrequests").
    -		Name(certificateSigningRequest.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Put().
    -		Resource("certificatesigningrequests").
    -		Name(certificateSigningRequest.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
    -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched certificateSigningRequest.
    -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) {
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Patch(pt).
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest.
    -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) {
    -	if certificateSigningRequest == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(certificateSigningRequest)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := certificateSigningRequest.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
    -	}
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("certificatesigningrequests").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) {
    -	if certificateSigningRequest == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(certificateSigningRequest)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := certificateSigningRequest.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("certificatesigningrequests").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
     	result = &v1.CertificateSigningRequest{}
    -	err = c.client.Put().
    +	err = c.GetClient().Put().
     		Resource("certificatesigningrequests").
     		Name(certificateSigningRequestName).
     		SubResource("approval").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go
    index adb7db0bf6..f3fc99f839 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go
    @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("Certificate
     
     // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
     func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
     
     // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
     func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) {
    +	emptyResult := &v1.CertificateSigningRequestList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1.CertificateSigningRequestList{})
    +		Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.L
     // Watch returns a watch.Interface that watches the requested certificateSigningRequests.
     func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts))
     }
     
     // Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
     
     // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) {
    +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o
     
     // Patch applies the patch and returns the patched certificateSigningRequest.
     func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS
     	if name == nil {
     		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
    @@ -169,20 +176,22 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif
     	if name == nil {
     		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
     
     // UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &v1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "approval", certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CertificateSigningRequest), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
    index 970fb15e6e..74fe9fa14c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/certificates/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface.
    @@ -55,143 +52,18 @@ type ClusterTrustBundleInterface interface {
     
     // clusterTrustBundles implements ClusterTrustBundleInterface
     type clusterTrustBundles struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]
     }
     
     // newClusterTrustBundles returns a ClusterTrustBundles
     func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles {
     	return &clusterTrustBundles{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration](
    +			"clustertrustbundles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} },
    +			func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }),
     	}
     }
    -
    -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any.
    -func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    -	result = &v1alpha1.ClusterTrustBundle{}
    -	err = c.client.Get().
    -		Resource("clustertrustbundles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors.
    -func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ClusterTrustBundleList{}
    -	err = c.client.Get().
    -		Resource("clustertrustbundles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterTrustBundles.
    -func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clustertrustbundles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterTrustBundle and creates it.  Returns the server's representation of the clusterTrustBundle, and an error, if there is any.
    -func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    -	result = &v1alpha1.ClusterTrustBundle{}
    -	err = c.client.Post().
    -		Resource("clustertrustbundles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterTrustBundle).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any.
    -func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    -	result = &v1alpha1.ClusterTrustBundle{}
    -	err = c.client.Put().
    -		Resource("clustertrustbundles").
    -		Name(clusterTrustBundle.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterTrustBundle).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs.
    -func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clustertrustbundles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clustertrustbundles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterTrustBundle.
    -func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) {
    -	result = &v1alpha1.ClusterTrustBundle{}
    -	err = c.client.Patch(pt).
    -		Resource("clustertrustbundles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle.
    -func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    -	if clusterTrustBundle == nil {
    -		return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterTrustBundle)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterTrustBundle.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ClusterTrustBundle{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clustertrustbundles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go
    index 2f849cbd7d..1c4e97bd40 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go
    @@ -43,20 +43,22 @@ var clustertrustbundlesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterTrust
     
     // Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any.
     func (c *FakeClusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    +	emptyResult := &v1alpha1.ClusterTrustBundle{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clustertrustbundlesResource, name), &v1alpha1.ClusterTrustBundle{})
    +		Invokes(testing.NewRootGetActionWithOptions(clustertrustbundlesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterTrustBundle), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors.
     func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) {
    +	emptyResult := &v1alpha1.ClusterTrustBundleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clustertrustbundlesResource, clustertrustbundlesKind, opts), &v1alpha1.ClusterTrustBundleList{})
    +		Invokes(testing.NewRootListActionWithOptions(clustertrustbundlesResource, clustertrustbundlesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions)
     // Watch returns a watch.Interface that watches the requested clusterTrustBundles.
     func (c *FakeClusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clustertrustbundlesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clustertrustbundlesResource, opts))
     }
     
     // Create takes the representation of a clusterTrustBundle and creates it.  Returns the server's representation of the clusterTrustBundle, and an error, if there is any.
     func (c *FakeClusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    +	emptyResult := &v1alpha1.ClusterTrustBundle{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterTrustBundle), err
     }
     
     // Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any.
     func (c *FakeClusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) {
    +	emptyResult := &v1alpha1.ClusterTrustBundle{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterTrustBundle), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterTrustBundles) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clustertrustbundlesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clustertrustbundlesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ClusterTrustBundleList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.
     
     // Patch applies the patch and returns the patched clusterTrustBundle.
     func (c *FakeClusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) {
    +	emptyResult := &v1alpha1.ClusterTrustBundle{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, name, pt, data, subresources...), &v1alpha1.ClusterTrustBundle{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterTrustBundle), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle
     	if name == nil {
     		return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ClusterTrustBundle{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterTrustBundle{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterTrustBundle), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
    index ec0b9d266f..de9915c5d6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/certificates/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface.
    @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface {
     type CertificateSigningRequestInterface interface {
     	Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error)
     	Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type CertificateSigningRequestInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error)
     	Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error)
     	CertificateSigningRequestExpansion
     }
     
     // certificateSigningRequests implements CertificateSigningRequestInterface
     type certificateSigningRequests struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]
     }
     
     // newCertificateSigningRequests returns a CertificateSigningRequests
     func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests {
     	return &certificateSigningRequests{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration](
    +			"certificatesigningrequests",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} },
    +			func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }),
     	}
     }
    -
    -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
    -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Get().
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
    -func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.CertificateSigningRequestList{}
    -	err = c.client.Get().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested certificateSigningRequests.
    -func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
    -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Post().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
    -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Put().
    -		Resource("certificatesigningrequests").
    -		Name(certificateSigningRequest.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Put().
    -		Resource("certificatesigningrequests").
    -		Name(certificateSigningRequest.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(certificateSigningRequest).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
    -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("certificatesigningrequests").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched certificateSigningRequest.
    -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Patch(pt).
    -		Resource("certificatesigningrequests").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest.
    -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	if certificateSigningRequest == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(certificateSigningRequest)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := certificateSigningRequest.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("certificatesigningrequests").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    -	if certificateSigningRequest == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(certificateSigningRequest)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := certificateSigningRequest.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.CertificateSigningRequest{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("certificatesigningrequests").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
    index 4737891411..4e631b0a40 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
    @@ -30,7 +30,7 @@ type CertificateSigningRequestExpansion interface {
     
     func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) {
     	result = &certificates.CertificateSigningRequest{}
    -	err = c.client.Put().
    +	err = c.GetClient().Put().
     		Resource("certificatesigningrequests").
     		Name(certificateSigningRequest.Name).
     		VersionedParams(&opts, scheme.ParameterCodec).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
    index 76bb38e7bf..ff5a9bd4c7 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
    @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("Certif
     
     // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
     func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
     
     // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
     func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequestList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{})
    +		Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListO
     // Watch returns a watch.Interface that watches the requested certificateSigningRequests.
     func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts))
     }
     
     // Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
     
     // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
     func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) {
    +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o
     
     // Patch applies the patch and returns the patched certificateSigningRequest.
     func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS
     	if name == nil {
     		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif
     	if name == nil {
     		return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CertificateSigningRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.CertificateSigningRequest{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CertificateSigningRequest), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go
    index 6dc7c4c17f..03f833f370 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go
    @@ -44,22 +44,24 @@ var leasesKind = v1.SchemeGroupVersion.WithKind("Lease")
     
     // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
     func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) {
    +	emptyResult := &v1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1.Lease{})
    +		Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Lease), err
     }
     
     // List takes label and field selectors, and returns the list of Leases that match those selectors.
     func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) {
    +	emptyResult := &v1.LeaseList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1.LeaseList{})
    +		Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested leases.
     func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
     func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) {
    +	emptyResult := &v1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1.Lease{})
    +		Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Lease), err
     }
     
     // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
     func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) {
    +	emptyResult := &v1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1.Lease{})
    +		Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Lease), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.LeaseList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt
     
     // Patch applies the patch and returns the patched lease.
     func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) {
    +	emptyResult := &v1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1.Lease{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Lease), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApply
     	if name == nil {
     		return nil, fmt.Errorf("lease.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Lease{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Lease), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
    index 9e6b169a81..97834d6ac0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/coordination/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // LeasesGetter has a method to return a LeaseInterface.
    @@ -55,154 +52,18 @@ type LeaseInterface interface {
     
     // leases implements LeaseInterface
     type leases struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]
     }
     
     // newLeases returns a Leases
     func newLeases(c *CoordinationV1Client, namespace string) *leases {
     	return &leases{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration](
    +			"leases",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Lease { return &v1.Lease{} },
    +			func() *v1.LeaseList { return &v1.LeaseList{} }),
     	}
     }
    -
    -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
    -func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) {
    -	result = &v1.Lease{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Leases that match those selectors.
    -func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.LeaseList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested leases.
    -func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
    -func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) {
    -	result = &v1.Lease{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(lease).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
    -func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) {
    -	result = &v1.Lease{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(lease.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(lease).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the lease and deletes it. Returns an error if one occurs.
    -func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched lease.
    -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) {
    -	result = &v1.Lease{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied lease.
    -func (c *leases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) {
    -	if lease == nil {
    -		return nil, fmt.Errorf("lease provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(lease)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := lease.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("lease.Name must be provided to Apply")
    -	}
    -	result = &v1.Lease{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go
    new file mode 100644
    index 0000000000..dd75e5d014
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go
    @@ -0,0 +1,107 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"net/http"
    +
    +	v1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	"k8s.io/client-go/kubernetes/scheme"
    +	rest "k8s.io/client-go/rest"
    +)
    +
    +type CoordinationV1alpha1Interface interface {
    +	RESTClient() rest.Interface
    +	LeaseCandidatesGetter
    +}
    +
    +// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group.
    +type CoordinationV1alpha1Client struct {
    +	restClient rest.Interface
    +}
    +
    +func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface {
    +	return newLeaseCandidates(c, namespace)
    +}
    +
    +// NewForConfig creates a new CoordinationV1alpha1Client for the given config.
    +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
    +// where httpClient was generated with rest.HTTPClientFor(c).
    +func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) {
    +	config := *c
    +	if err := setConfigDefaults(&config); err != nil {
    +		return nil, err
    +	}
    +	httpClient, err := rest.HTTPClientFor(&config)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return NewForConfigAndClient(&config, httpClient)
    +}
    +
    +// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client.
    +// Note the http client provided takes precedence over the configured transport values.
    +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) {
    +	config := *c
    +	if err := setConfigDefaults(&config); err != nil {
    +		return nil, err
    +	}
    +	client, err := rest.RESTClientForConfigAndClient(&config, h)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return &CoordinationV1alpha1Client{client}, nil
    +}
    +
    +// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and
    +// panics if there is an error in the config.
    +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client {
    +	client, err := NewForConfig(c)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return client
    +}
    +
    +// New creates a new CoordinationV1alpha1Client for the given RESTClient.
    +func New(c rest.Interface) *CoordinationV1alpha1Client {
    +	return &CoordinationV1alpha1Client{c}
    +}
    +
    +func setConfigDefaults(config *rest.Config) error {
    +	gv := v1alpha1.SchemeGroupVersion
    +	config.GroupVersion = &gv
    +	config.APIPath = "/apis"
    +	config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
    +
    +	if config.UserAgent == "" {
    +		config.UserAgent = rest.DefaultKubernetesUserAgent()
    +	}
    +
    +	return nil
    +}
    +
    +// RESTClient returns a RESTClient that is used to communicate
    +// with API server by this client implementation.
    +func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface {
    +	if c == nil {
    +		return nil
    +	}
    +	return c.restClient
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go
    new file mode 100644
    index 0000000000..df51baa4d4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go
    @@ -0,0 +1,20 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +// This package has the automatically generated typed clients.
    +package v1alpha1
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go
    similarity index 100%
    rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go
    rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go
    new file mode 100644
    index 0000000000..2e7d4be268
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go
    @@ -0,0 +1,40 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	v1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
    +	rest "k8s.io/client-go/rest"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +type FakeCoordinationV1alpha1 struct {
    +	*testing.Fake
    +}
    +
    +func (c *FakeCoordinationV1alpha1) LeaseCandidates(namespace string) v1alpha1.LeaseCandidateInterface {
    +	return &FakeLeaseCandidates{c, namespace}
    +}
    +
    +// RESTClient returns a RESTClient that is used to communicate
    +// with API server by this client implementation.
    +func (c *FakeCoordinationV1alpha1) RESTClient() rest.Interface {
    +	var ret *rest.RESTClient
    +	return ret
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go
    new file mode 100644
    index 0000000000..c3de2303ca
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go
    @@ -0,0 +1,160 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeLeaseCandidates implements LeaseCandidateInterface
    +type FakeLeaseCandidates struct {
    +	Fake *FakeCoordinationV1alpha1
    +	ns   string
    +}
    +
    +var leasecandidatesResource = v1alpha1.SchemeGroupVersion.WithResource("leasecandidates")
    +
    +var leasecandidatesKind = v1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate")
    +
    +// Get takes name of the leaseCandidate, and returns the corresponding leaseCandidate object, and an error if there is any.
    +func (c *FakeLeaseCandidates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.LeaseCandidate, err error) {
    +	emptyResult := &v1alpha1.LeaseCandidate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewGetActionWithOptions(leasecandidatesResource, c.ns, name, options), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha1.LeaseCandidate), err
    +}
    +
    +// List takes label and field selectors, and returns the list of LeaseCandidates that match those selectors.
    +func (c *FakeLeaseCandidates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LeaseCandidateList, err error) {
    +	emptyResult := &v1alpha1.LeaseCandidateList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewListActionWithOptions(leasecandidatesResource, leasecandidatesKind, c.ns, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha1.LeaseCandidateList{ListMeta: obj.(*v1alpha1.LeaseCandidateList).ListMeta}
    +	for _, item := range obj.(*v1alpha1.LeaseCandidateList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested leaseCandidates.
    +func (c *FakeLeaseCandidates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewWatchActionWithOptions(leasecandidatesResource, c.ns, opts))
    +
    +}
    +
    +// Create takes the representation of a leaseCandidate and creates it.  Returns the server's representation of the leaseCandidate, and an error, if there is any.
    +func (c *FakeLeaseCandidates) Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (result *v1alpha1.LeaseCandidate, err error) {
    +	emptyResult := &v1alpha1.LeaseCandidate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewCreateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha1.LeaseCandidate), err
    +}
    +
    +// Update takes the representation of a leaseCandidate and updates it. Returns the server's representation of the leaseCandidate, and an error, if there is any.
    +func (c *FakeLeaseCandidates) Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (result *v1alpha1.LeaseCandidate, err error) {
    +	emptyResult := &v1alpha1.LeaseCandidate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha1.LeaseCandidate), err
    +}
    +
    +// Delete takes name of the leaseCandidate and deletes it. Returns an error if one occurs.
    +func (c *FakeLeaseCandidates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewDeleteActionWithOptions(leasecandidatesResource, c.ns, name, opts), &v1alpha1.LeaseCandidate{})
    +
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeLeaseCandidates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewDeleteCollectionActionWithOptions(leasecandidatesResource, c.ns, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha1.LeaseCandidateList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched leaseCandidate.
    +func (c *FakeLeaseCandidates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) {
    +	emptyResult := &v1alpha1.LeaseCandidate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha1.LeaseCandidate), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied leaseCandidate.
    +func (c *FakeLeaseCandidates) Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) {
    +	if leaseCandidate == nil {
    +		return nil, fmt.Errorf("leaseCandidate provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(leaseCandidate)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := leaseCandidate.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("leaseCandidate.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha1.LeaseCandidate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha1.LeaseCandidate), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go
    new file mode 100644
    index 0000000000..2dc2f30cfc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go
    @@ -0,0 +1,21 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +type LeaseCandidateExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go
    new file mode 100644
    index 0000000000..868185135b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	"context"
    +
    +	v1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// LeaseCandidatesGetter has a method to return a LeaseCandidateInterface.
    +// A group's client should implement this interface.
    +type LeaseCandidatesGetter interface {
    +	LeaseCandidates(namespace string) LeaseCandidateInterface
    +}
    +
    +// LeaseCandidateInterface has methods to work with LeaseCandidate resources.
    +type LeaseCandidateInterface interface {
    +	Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error)
    +	Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error)
    +	Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error)
    +	LeaseCandidateExpansion
    +}
    +
    +// leaseCandidates implements LeaseCandidateInterface
    +type leaseCandidates struct {
    +	*gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]
    +}
    +
    +// newLeaseCandidates returns a LeaseCandidates
    +func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates {
    +	return &leaseCandidates{
    +		gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration](
    +			"leasecandidates",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} },
    +			func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
    index 9a4a0d7eb9..112784af94 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
    @@ -44,22 +44,24 @@ var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease")
     
     // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
     func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
    +	emptyResult := &v1beta1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{})
    +		Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Lease), err
     }
     
     // List takes label and field selectors, and returns the list of Leases that match those selectors.
     func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
    +	emptyResult := &v1beta1.LeaseList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{})
    +		Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1b
     // Watch returns a watch.Interface that watches the requested leases.
     func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
     func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) {
    +	emptyResult := &v1beta1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{})
    +		Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Lease), err
     }
     
     // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
     func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) {
    +	emptyResult := &v1beta1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{})
    +		Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Lease), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOpti
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.LeaseList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions
     
     // Patch applies the patch and returns the patched lease.
     func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) {
    +	emptyResult := &v1beta1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Lease), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1beta1.Lease
     	if name == nil {
     		return nil, fmt.Errorf("lease.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Lease{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Lease{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Lease), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
    index 1bbd57bdd1..62341e53b6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/coordination/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // LeasesGetter has a method to return a LeaseInterface.
    @@ -55,154 +52,18 @@ type LeaseInterface interface {
     
     // leases implements LeaseInterface
     type leases struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]
     }
     
     // newLeases returns a Leases
     func newLeases(c *CoordinationV1beta1Client, namespace string) *leases {
     	return &leases{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration](
    +			"leases",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Lease { return &v1beta1.Lease{} },
    +			func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }),
     	}
     }
    -
    -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
    -func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
    -	result = &v1beta1.Lease{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Leases that match those selectors.
    -func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.LeaseList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested leases.
    -func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
    -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) {
    -	result = &v1beta1.Lease{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(lease).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
    -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) {
    -	result = &v1beta1.Lease{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(lease.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(lease).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the lease and deletes it. Returns an error if one occurs.
    -func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("leases").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched lease.
    -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) {
    -	result = &v1beta1.Lease{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied lease.
    -func (c *leases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) {
    -	if lease == nil {
    -		return nil, fmt.Errorf("lease provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(lease)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := lease.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("lease.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Lease{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("leases").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
    index 0fef56429d..ab9458a5c9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ComponentStatusesGetter has a method to return a ComponentStatusInterface.
    @@ -55,143 +52,18 @@ type ComponentStatusInterface interface {
     
     // componentStatuses implements ComponentStatusInterface
     type componentStatuses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]
     }
     
     // newComponentStatuses returns a ComponentStatuses
     func newComponentStatuses(c *CoreV1Client) *componentStatuses {
     	return &componentStatuses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration](
    +			"componentstatuses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ComponentStatus { return &v1.ComponentStatus{} },
    +			func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }),
     	}
     }
    -
    -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any.
    -func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) {
    -	result = &v1.ComponentStatus{}
    -	err = c.client.Get().
    -		Resource("componentstatuses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors.
    -func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ComponentStatusList{}
    -	err = c.client.Get().
    -		Resource("componentstatuses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested componentStatuses.
    -func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("componentstatuses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a componentStatus and creates it.  Returns the server's representation of the componentStatus, and an error, if there is any.
    -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) {
    -	result = &v1.ComponentStatus{}
    -	err = c.client.Post().
    -		Resource("componentstatuses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(componentStatus).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any.
    -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) {
    -	result = &v1.ComponentStatus{}
    -	err = c.client.Put().
    -		Resource("componentstatuses").
    -		Name(componentStatus.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(componentStatus).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs.
    -func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("componentstatuses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("componentstatuses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched componentStatus.
    -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) {
    -	result = &v1.ComponentStatus{}
    -	err = c.client.Patch(pt).
    -		Resource("componentstatuses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus.
    -func (c *componentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) {
    -	if componentStatus == nil {
    -		return nil, fmt.Errorf("componentStatus provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(componentStatus)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := componentStatus.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("componentStatus.Name must be provided to Apply")
    -	}
    -	result = &v1.ComponentStatus{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("componentstatuses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
    index b68177720b..72aa2361f0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ConfigMapsGetter has a method to return a ConfigMapInterface.
    @@ -55,154 +52,18 @@ type ConfigMapInterface interface {
     
     // configMaps implements ConfigMapInterface
     type configMaps struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]
     }
     
     // newConfigMaps returns a ConfigMaps
     func newConfigMaps(c *CoreV1Client, namespace string) *configMaps {
     	return &configMaps{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration](
    +			"configmaps",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ConfigMap { return &v1.ConfigMap{} },
    +			func() *v1.ConfigMapList { return &v1.ConfigMapList{} }),
     	}
     }
    -
    -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
    -func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) {
    -	result = &v1.ConfigMap{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
    -func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ConfigMapList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested configMaps.
    -func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a configMap and creates it.  Returns the server's representation of the configMap, and an error, if there is any.
    -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) {
    -	result = &v1.ConfigMap{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(configMap).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
    -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) {
    -	result = &v1.ConfigMap{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		Name(configMap.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(configMap).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the configMap and deletes it. Returns an error if one occurs.
    -func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched configMap.
    -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) {
    -	result = &v1.ConfigMap{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap.
    -func (c *configMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) {
    -	if configMap == nil {
    -		return nil, fmt.Errorf("configMap provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(configMap)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := configMap.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("configMap.Name must be provided to Apply")
    -	}
    -	result = &v1.ConfigMap{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("configmaps").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
    index cdf464b069..9b9fc5fc1e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EndpointsGetter has a method to return a EndpointsInterface.
    @@ -55,154 +52,18 @@ type EndpointsInterface interface {
     
     // endpoints implements EndpointsInterface
     type endpoints struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]
     }
     
     // newEndpoints returns a Endpoints
     func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
     	return &endpoints{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration](
    +			"endpoints",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Endpoints { return &v1.Endpoints{} },
    +			func() *v1.EndpointsList { return &v1.EndpointsList{} }),
     	}
     }
    -
    -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
    -func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
    -	result = &v1.Endpoints{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Endpoints that match those selectors.
    -func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.EndpointsList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested endpoints.
    -func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a endpoints and creates it.  Returns the server's representation of the endpoints, and an error, if there is any.
    -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) {
    -	result = &v1.Endpoints{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpoints).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
    -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) {
    -	result = &v1.Endpoints{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		Name(endpoints.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpoints).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
    -func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched endpoints.
    -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) {
    -	result = &v1.Endpoints{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints.
    -func (c *endpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) {
    -	if endpoints == nil {
    -		return nil, fmt.Errorf("endpoints provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(endpoints)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := endpoints.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("endpoints.Name must be provided to Apply")
    -	}
    -	result = &v1.Endpoints{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("endpoints").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
    index 8274d85ffe..5ff0f06906 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EventsGetter has a method to return a EventInterface.
    @@ -55,154 +52,18 @@ type EventInterface interface {
     
     // events implements EventInterface
     type events struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]
     }
     
     // newEvents returns a Events
     func newEvents(c *CoreV1Client, namespace string) *events {
     	return &events{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration](
    +			"events",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Event { return &v1.Event{} },
    +			func() *v1.EventList { return &v1.EventList{} }),
     	}
     }
    -
    -// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
    -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Events that match those selectors.
    -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.EventList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested events.
    -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(event.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the event and deletes it. Returns an error if one occurs.
    -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched event.
    -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied event.
    -func (c *events) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) {
    -	if event == nil {
    -		return nil, fmt.Errorf("event provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(event)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := event.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("event.Name must be provided to Apply")
    -	}
    -	result = &v1.Event{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
    index a3fdf57a98..4243572328 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
    @@ -48,11 +48,11 @@ type EventExpansion interface {
     // event; it must either match this event client's namespace, or this event
     // client must have been created with the "" namespace.
     func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
    -	if e.ns != "" && event.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
    +	if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace())
     	}
     	result := &v1.Event{}
    -	err := e.client.Post().
    +	err := e.GetClient().Post().
     		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
     		Resource("events").
     		Body(event).
    @@ -67,11 +67,11 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
     // created with the "" namespace. Update also requires the ResourceVersion to be set in the event
     // object.
     func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
    -	if e.ns != "" && event.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
    +	if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace())
     	}
     	result := &v1.Event{}
    -	err := e.client.Put().
    +	err := e.GetClient().Put().
     		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
     		Resource("events").
     		Name(event.Name).
    @@ -87,11 +87,11 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
     // match this event client's namespace, or this event client must have been
     // created with the "" namespace.
     func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) {
    -	if e.ns != "" && incompleteEvent.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns)
    +	if e.GetNamespace() != "" && incompleteEvent.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.GetNamespace())
     	}
     	result := &v1.Event{}
    -	err := e.client.Patch(types.StrategicMergePatchType).
    +	err := e.GetClient().Patch(types.StrategicMergePatchType).
     		NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0).
     		Resource("events").
     		Name(incompleteEvent.Name).
    @@ -109,8 +109,8 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev
     	if err != nil {
     		return nil, err
     	}
    -	if len(e.ns) > 0 && ref.Namespace != e.ns {
    -		return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns)
    +	if len(e.GetNamespace()) > 0 && ref.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.GetNamespace())
     	}
     	stringRefKind := string(ref.Kind)
     	var refKind *string
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
    index 39d4c3282e..dbd305280b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
    @@ -43,20 +43,22 @@ var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus")
     
     // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any.
     func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) {
    +	emptyResult := &v1.ComponentStatus{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{})
    +		Invokes(testing.NewRootGetActionWithOptions(componentstatusesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ComponentStatus), err
     }
     
     // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors.
     func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) {
    +	emptyResult := &v1.ComponentStatusList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &v1.ComponentStatusList{})
    +		Invokes(testing.NewRootListActionWithOptions(componentstatusesResource, componentstatusesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOption
     // Watch returns a watch.Interface that watches the requested componentStatuses.
     func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(componentstatusesResource, opts))
     }
     
     // Create takes the representation of a componentStatus and creates it.  Returns the server's representation of the componentStatus, and an error, if there is any.
     func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) {
    +	emptyResult := &v1.ComponentStatus{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{})
    +		Invokes(testing.NewRootCreateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ComponentStatus), err
     }
     
     // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any.
     func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) {
    +	emptyResult := &v1.ComponentStatus{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ComponentStatus), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts me
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(componentstatusesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ComponentStatusList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav
     
     // Patch applies the patch and returns the patched componentStatus.
     func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) {
    +	emptyResult := &v1.ComponentStatus{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &v1.ComponentStatus{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ComponentStatus), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *core
     	if name == nil {
     		return nil, fmt.Errorf("componentStatus.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ComponentStatus{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &v1.ComponentStatus{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ComponentStatus), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
    index 6e8a38bd8f..ae760add7f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
    @@ -44,22 +44,24 @@ var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap")
     
     // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
     func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) {
    +	emptyResult := &v1.ConfigMap{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{})
    +		Invokes(testing.NewGetActionWithOptions(configmapsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ConfigMap), err
     }
     
     // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
     func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) {
    +	emptyResult := &v1.ConfigMapList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &v1.ConfigMapList{})
    +		Invokes(testing.NewListActionWithOptions(configmapsResource, configmapsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested configMaps.
     func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(configmapsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a configMap and creates it.  Returns the server's representation of the configMap, and an error, if there is any.
     func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) {
    +	emptyResult := &v1.ConfigMap{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{})
    +		Invokes(testing.NewCreateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ConfigMap), err
     }
     
     // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
     func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) {
    +	emptyResult := &v1.ConfigMap{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{})
    +		Invokes(testing.NewUpdateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ConfigMap), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(configmapsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ConfigMapList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.Delet
     
     // Patch applies the patch and returns the patched configMap.
     func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) {
    +	emptyResult := &v1.ConfigMap{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &v1.ConfigMap{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ConfigMap), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapA
     	if name == nil {
     		return nil, fmt.Errorf("configMap.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ConfigMap{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ConfigMap{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ConfigMap), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
    index 6b2f6c249e..7e2e91cfa6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
    @@ -44,22 +44,24 @@ var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints")
     
     // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
     func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
    +	emptyResult := &v1.Endpoints{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{})
    +		Invokes(testing.NewGetActionWithOptions(endpointsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Endpoints), err
     }
     
     // List takes label and field selectors, and returns the list of Endpoints that match those selectors.
     func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
    +	emptyResult := &v1.EndpointsList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{})
    +		Invokes(testing.NewListActionWithOptions(endpointsResource, endpointsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (resu
     // Watch returns a watch.Interface that watches the requested endpoints.
     func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(endpointsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a endpoints and creates it.  Returns the server's representation of the endpoints, and an error, if there is any.
     func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) {
    +	emptyResult := &v1.Endpoints{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{})
    +		Invokes(testing.NewCreateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Endpoints), err
     }
     
     // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
     func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) {
    +	emptyResult := &v1.Endpoints{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{})
    +		Invokes(testing.NewUpdateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Endpoints), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.Del
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(endpointsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.EndpointsList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.Delete
     
     // Patch applies the patch and returns the patched endpoints.
     func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) {
    +	emptyResult := &v1.Endpoints{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Endpoints), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsAp
     	if name == nil {
     		return nil, fmt.Errorf("endpoints.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Endpoints{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Endpoints{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Endpoints), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
    index 9ad879b394..a438ba4737 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
    @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event")
     
     // Get takes name of the event, and returns the corresponding event object, and an error if there is any.
     func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{})
    +		Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
     
     // List takes label and field selectors, and returns the list of Events that match those selectors.
     func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) {
    +	emptyResult := &v1.EventList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{})
    +		Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested events.
     func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{})
    +		Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
     
     // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{})
    +		Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.EventList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt
     
     // Patch applies the patch and returns the patched event.
     func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfigur
     	if name == nil {
     		return nil, fmt.Errorf("event.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
    index f18b5741c3..4cc36131ae 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
    @@ -44,22 +44,24 @@ var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange")
     
     // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any.
     func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) {
    +	emptyResult := &v1.LimitRange{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{})
    +		Invokes(testing.NewGetActionWithOptions(limitrangesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LimitRange), err
     }
     
     // List takes label and field selectors, and returns the list of LimitRanges that match those selectors.
     func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) {
    +	emptyResult := &v1.LimitRangeList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &v1.LimitRangeList{})
    +		Invokes(testing.NewListActionWithOptions(limitrangesResource, limitrangesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested limitRanges.
     func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(limitrangesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a limitRange and creates it.  Returns the server's representation of the limitRange, and an error, if there is any.
     func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) {
    +	emptyResult := &v1.LimitRange{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{})
    +		Invokes(testing.NewCreateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LimitRange), err
     }
     
     // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any.
     func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) {
    +	emptyResult := &v1.LimitRange{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{})
    +		Invokes(testing.NewUpdateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LimitRange), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(limitrangesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.LimitRangeList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.Dele
     
     // Patch applies the patch and returns the patched limitRange.
     func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) {
    +	emptyResult := &v1.LimitRange{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &v1.LimitRange{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LimitRange), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRan
     	if name == nil {
     		return nil, fmt.Errorf("limitRange.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.LimitRange{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &v1.LimitRange{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.LimitRange), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
    index 52fcff591e..093990571f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
    @@ -43,20 +43,22 @@ var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace")
     
     // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
     func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{})
    +		Invokes(testing.NewRootGetActionWithOptions(namespacesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
     
     // List takes label and field selectors, and returns the list of Namespaces that match those selectors.
     func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
    +	emptyResult := &v1.NamespaceList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{})
    +		Invokes(testing.NewRootListActionWithOptions(namespacesResource, namespacesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested namespaces.
     func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(namespacesResource, opts))
     }
     
     // Create takes the representation of a namespace and creates it.  Returns the server's representation of the namespace, and an error, if there is any.
     func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) {
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{})
    +		Invokes(testing.NewRootCreateActionWithOptions(namespacesResource, namespace, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
     
     // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
     func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(namespacesResource, namespace, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) {
    +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(namespacesResource, "status", namespace, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
    @@ -118,10 +123,11 @@ func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.De
     
     // Patch applies the patch and returns the patched namespace.
     func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) {
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
    @@ -139,10 +145,11 @@ func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceA
     	if name == nil {
     		return nil, fmt.Errorf("namespace.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &v1.Namespace{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
    @@ -161,10 +168,11 @@ func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.Name
     	if name == nil {
     		return nil, fmt.Errorf("namespace.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Namespace{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &v1.Namespace{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Namespace), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
    index 5df40f8d11..451f992da1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
    @@ -43,20 +43,22 @@ var nodesKind = v1.SchemeGroupVersion.WithKind("Node")
     
     // Get takes name of the node, and returns the corresponding node object, and an error if there is any.
     func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{})
    +		Invokes(testing.NewRootGetActionWithOptions(nodesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
     
     // List takes label and field selectors, and returns the list of Nodes that match those selectors.
     func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
    +	emptyResult := &v1.NodeList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{})
    +		Invokes(testing.NewRootListActionWithOptions(nodesResource, nodesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *
     // Watch returns a watch.Interface that watches the requested nodes.
     func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(nodesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(nodesResource, opts))
     }
     
     // Create takes the representation of a node and creates it.  Returns the server's representation of the node, and an error, if there is any.
     func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{})
    +		Invokes(testing.NewRootCreateActionWithOptions(nodesResource, node, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
     
     // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
     func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(nodesResource, node, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) {
    +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(nodesResource, "status", node, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteO
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(nodesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.NodeList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti
     
     // Patch applies the patch and returns the patched node.
     func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfigurati
     	if name == nil {
     		return nil, fmt.Errorf("node.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &v1.Node{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfi
     	if name == nil {
     		return nil, fmt.Errorf("node.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Node{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &v1.Node{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Node), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
    index 5b06d0b192..16a1f2201a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
    @@ -43,20 +43,22 @@ var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume")
     
     // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any.
     func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) {
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootGetActionWithOptions(persistentvolumesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
     
     // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors.
     func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) {
    +	emptyResult := &v1.PersistentVolumeList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &v1.PersistentVolumeList{})
    +		Invokes(testing.NewRootListActionWithOptions(persistentvolumesResource, persistentvolumesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOption
     // Watch returns a watch.Interface that watches the requested persistentVolumes.
     func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(persistentvolumesResource, opts))
     }
     
     // Create takes the representation of a persistentVolume and creates it.  Returns the server's representation of the persistentVolume, and an error, if there is any.
     func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) {
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootCreateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
     
     // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
     func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) {
    +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(persistentvolumesResource, "status", persistentVolume, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
    @@ -118,7 +123,7 @@ func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts me
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(persistentvolumesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav
     
     // Patch applies the patch and returns the patched persistentVolume.
     func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) {
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
    @@ -147,10 +153,11 @@ func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *cor
     	if name == nil {
     		return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
    @@ -169,10 +176,11 @@ func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolum
     	if name == nil {
     		return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PersistentVolume{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolume{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolume), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
    index b860e53674..12617c2432 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
    @@ -44,22 +44,24 @@ var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolum
     
     // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any.
     func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) {
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewGetActionWithOptions(persistentvolumeclaimsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
     
     // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors.
     func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) {
    +	emptyResult := &v1.PersistentVolumeClaimList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &v1.PersistentVolumeClaimList{})
    +		Invokes(testing.NewListActionWithOptions(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListO
     // Watch returns a watch.Interface that watches the requested persistentVolumeClaims.
     func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(persistentvolumeclaimsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a persistentVolumeClaim and creates it.  Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
     func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) {
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewCreateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
     
     // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
     func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewUpdateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) {
    +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
    @@ -126,7 +131,7 @@ func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, op
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(persistentvolumeclaimsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts
     
     // Patch applies the patch and returns the patched persistentVolumeClaim.
     func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) {
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
    @@ -156,11 +162,12 @@ func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolume
     	if name == nil {
     		return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
    @@ -179,11 +186,12 @@ func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistent
     	if name == nil {
     		return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PersistentVolumeClaim{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolumeClaim{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PersistentVolumeClaim), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
    index 23634c7d07..d2b46e8e3a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
    @@ -44,22 +44,24 @@ var podsKind = v1.SchemeGroupVersion.WithKind("Pod")
     
     // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
     func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{})
    +		Invokes(testing.NewGetActionWithOptions(podsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
     
     // List takes label and field selectors, and returns the list of Pods that match those selectors.
     func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) {
    +	emptyResult := &v1.PodList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{})
    +		Invokes(testing.NewListActionWithOptions(podsResource, podsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v
     // Watch returns a watch.Interface that watches the requested pods.
     func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(podsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a pod and creates it.  Returns the server's representation of the pod, and an error, if there is any.
     func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{})
    +		Invokes(testing.NewCreateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
     
     // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
     func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{})
    +		Invokes(testing.NewUpdateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) {
    +func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "status", c.ns, pod, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
    @@ -126,7 +131,7 @@ func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOp
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(podsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PodList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio
     
     // Patch applies the patch and returns the patched pod.
     func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
    @@ -156,11 +162,12 @@ func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration,
     	if name == nil {
     		return nil, fmt.Errorf("pod.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Pod{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
    @@ -179,22 +186,24 @@ func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfigur
     	if name == nil {
     		return nil, fmt.Errorf("pod.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Pod{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
     
     // UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
     func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
    +	emptyResult := &v1.Pod{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "ephemeralcontainers", c.ns, pod, opts), &v1.Pod{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Pod), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
    index 9fa97ab402..dc9affdd06 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
    @@ -44,22 +44,24 @@ var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate")
     
     // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any.
     func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) {
    +	emptyResult := &v1.PodTemplate{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{})
    +		Invokes(testing.NewGetActionWithOptions(podtemplatesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodTemplate), err
     }
     
     // List takes label and field selectors, and returns the list of PodTemplates that match those selectors.
     func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) {
    +	emptyResult := &v1.PodTemplateList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &v1.PodTemplateList{})
    +		Invokes(testing.NewListActionWithOptions(podtemplatesResource, podtemplatesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (r
     // Watch returns a watch.Interface that watches the requested podTemplates.
     func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(podtemplatesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a podTemplate and creates it.  Returns the server's representation of the podTemplate, and an error, if there is any.
     func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) {
    +	emptyResult := &v1.PodTemplate{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{})
    +		Invokes(testing.NewCreateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodTemplate), err
     }
     
     // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any.
     func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) {
    +	emptyResult := &v1.PodTemplate{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{})
    +		Invokes(testing.NewUpdateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodTemplate), err
     }
    @@ -114,7 +118,7 @@ func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1.
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(podtemplatesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PodTemplateList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.Del
     
     // Patch applies the patch and returns the patched podTemplate.
     func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) {
    +	emptyResult := &v1.PodTemplate{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &v1.PodTemplate{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodTemplate), err
     }
    @@ -144,11 +149,12 @@ func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTem
     	if name == nil {
     		return nil, fmt.Errorf("podTemplate.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PodTemplate{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodTemplate{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodTemplate), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
    index 1e469c9b1a..6b3497f089 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
    @@ -45,22 +45,24 @@ var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationCont
     
     // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any.
     func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) {
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{})
    +		Invokes(testing.NewGetActionWithOptions(replicationcontrollersResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
     
     // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors.
     func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) {
    +	emptyResult := &v1.ReplicationControllerList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &v1.ReplicationControllerList{})
    +		Invokes(testing.NewListActionWithOptions(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -79,40 +81,43 @@ func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListO
     // Watch returns a watch.Interface that watches the requested replicationControllers.
     func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(replicationcontrollersResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a replicationController and creates it.  Returns the server's representation of the replicationController, and an error, if there is any.
     func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) {
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{})
    +		Invokes(testing.NewCreateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
     
     // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any.
     func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{})
    +		Invokes(testing.NewUpdateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) {
    +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "status", c.ns, replicationController, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
    @@ -127,7 +132,7 @@ func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, op
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(replicationcontrollersResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{})
     	return err
    @@ -135,11 +140,12 @@ func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts
     
     // Patch applies the patch and returns the patched replicationController.
     func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) {
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &v1.ReplicationController{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
    @@ -157,11 +163,12 @@ func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationContr
     	if name == nil {
     		return nil, fmt.Errorf("replicationController.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicationController{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
    @@ -180,33 +187,36 @@ func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicatio
     	if name == nil {
     		return nil, fmt.Errorf("replicationController.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ReplicationController{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicationController{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ReplicationController), err
     }
     
     // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(replicationcontrollersResource, c.ns, "scale", replicationControllerName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
    +	emptyResult := &autoscalingv1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*autoscalingv1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
    index 87664985ce..5e2e02afc1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
    @@ -44,22 +44,24 @@ var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota")
     
     // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
     func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) {
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{})
    +		Invokes(testing.NewGetActionWithOptions(resourcequotasResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
     
     // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
     func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) {
    +	emptyResult := &v1.ResourceQuotaList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &v1.ResourceQuotaList{})
    +		Invokes(testing.NewListActionWithOptions(resourcequotasResource, resourcequotasKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested resourceQuotas.
     func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(resourcequotasResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a resourceQuota and creates it.  Returns the server's representation of the resourceQuota, and an error, if there is any.
     func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) {
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{})
    +		Invokes(testing.NewCreateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
     
     // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
     func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{})
    +		Invokes(testing.NewUpdateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) {
    +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(resourcequotasResource, "status", c.ns, resourceQuota, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(resourcequotasResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.D
     
     // Patch applies the patch and returns the patched resourceQuota.
     func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) {
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &v1.ResourceQuota{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.Re
     	if name == nil {
     		return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &v1.ResourceQuota{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *cor
     	if name == nil {
     		return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ResourceQuota{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ResourceQuota{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ResourceQuota), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
    index 90035a7037..ec0fc65b5b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
    @@ -44,22 +44,24 @@ var secretsKind = v1.SchemeGroupVersion.WithKind("Secret")
     
     // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
     func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) {
    +	emptyResult := &v1.Secret{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{})
    +		Invokes(testing.NewGetActionWithOptions(secretsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Secret), err
     }
     
     // List takes label and field selectors, and returns the list of Secrets that match those selectors.
     func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) {
    +	emptyResult := &v1.SecretList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{})
    +		Invokes(testing.NewListActionWithOptions(secretsResource, secretsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested secrets.
     func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(secretsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a secret and creates it.  Returns the server's representation of the secret, and an error, if there is any.
     func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
    +	emptyResult := &v1.Secret{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{})
    +		Invokes(testing.NewCreateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Secret), err
     }
     
     // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
     func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) {
    +	emptyResult := &v1.Secret{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{})
    +		Invokes(testing.NewUpdateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Secret), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(secretsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.SecretList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOp
     
     // Patch applies the patch and returns the patched secret.
     func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
    +	emptyResult := &v1.Secret{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Secret), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfi
     	if name == nil {
     		return nil, fmt.Errorf("secret.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Secret{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Secret{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Secret), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
    index 514ab19e39..2a3cf45fbc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
    @@ -44,22 +44,24 @@ var servicesKind = v1.SchemeGroupVersion.WithKind("Service")
     
     // Get takes name of the service, and returns the corresponding service object, and an error if there is any.
     func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) {
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{})
    +		Invokes(testing.NewGetActionWithOptions(servicesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
     
     // List takes label and field selectors, and returns the list of Services that match those selectors.
     func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) {
    +	emptyResult := &v1.ServiceList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{})
    +		Invokes(testing.NewListActionWithOptions(servicesResource, servicesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested services.
     func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(servicesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a service and creates it.  Returns the server's representation of the service, and an error, if there is any.
     func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) {
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{})
    +		Invokes(testing.NewCreateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
     
     // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
     func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{})
    +		Invokes(testing.NewUpdateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) {
    +func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(servicesResource, "status", c.ns, service, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
    @@ -126,11 +131,12 @@ func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.Dele
     
     // Patch applies the patch and returns the patched service.
     func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) {
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
    @@ -148,11 +154,12 @@ func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyCo
     	if name == nil {
     		return nil, fmt.Errorf("service.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Service{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
    @@ -171,11 +178,12 @@ func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceA
     	if name == nil {
     		return nil, fmt.Errorf("service.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Service{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Service{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Service), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
    index 115ff07123..f3ad8d40f9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
    @@ -45,22 +45,24 @@ var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount")
     
     // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any.
     func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) {
    +	emptyResult := &v1.ServiceAccount{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{})
    +		Invokes(testing.NewGetActionWithOptions(serviceaccountsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ServiceAccount), err
     }
     
     // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors.
     func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) {
    +	emptyResult := &v1.ServiceAccountList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &v1.ServiceAccountList{})
    +		Invokes(testing.NewListActionWithOptions(serviceaccountsResource, serviceaccountsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -79,28 +81,30 @@ func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested serviceAccounts.
     func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(serviceaccountsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a serviceAccount and creates it.  Returns the server's representation of the serviceAccount, and an error, if there is any.
     func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) {
    +	emptyResult := &v1.ServiceAccount{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{})
    +		Invokes(testing.NewCreateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ServiceAccount), err
     }
     
     // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
     func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) {
    +	emptyResult := &v1.ServiceAccount{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{})
    +		Invokes(testing.NewUpdateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ServiceAccount), err
     }
    @@ -115,7 +119,7 @@ func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts meta
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(serviceaccountsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ServiceAccountList{})
     	return err
    @@ -123,11 +127,12 @@ func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.
     
     // Patch applies the patch and returns the patched serviceAccount.
     func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) {
    +	emptyResult := &v1.ServiceAccount{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &v1.ServiceAccount{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ServiceAccount), err
     }
    @@ -145,22 +150,24 @@ func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1.
     	if name == nil {
     		return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ServiceAccount{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ServiceAccount{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ServiceAccount), err
     }
     
     // CreateToken takes the representation of a tokenRequest and creates it.  Returns the server's representation of the tokenRequest, and an error, if there is any.
     func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) {
    +	emptyResult := &authenticationv1.TokenRequest{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{})
    +		Invokes(testing.NewCreateSubresourceActionWithOptions(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*authenticationv1.TokenRequest), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
    index e6883b607c..f8e4048f98 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // LimitRangesGetter has a method to return a LimitRangeInterface.
    @@ -55,154 +52,18 @@ type LimitRangeInterface interface {
     
     // limitRanges implements LimitRangeInterface
     type limitRanges struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]
     }
     
     // newLimitRanges returns a LimitRanges
     func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges {
     	return &limitRanges{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration](
    +			"limitranges",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.LimitRange { return &v1.LimitRange{} },
    +			func() *v1.LimitRangeList { return &v1.LimitRangeList{} }),
     	}
     }
    -
    -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any.
    -func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) {
    -	result = &v1.LimitRange{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors.
    -func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.LimitRangeList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested limitRanges.
    -func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a limitRange and creates it.  Returns the server's representation of the limitRange, and an error, if there is any.
    -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) {
    -	result = &v1.LimitRange{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(limitRange).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any.
    -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) {
    -	result = &v1.LimitRange{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		Name(limitRange.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(limitRange).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs.
    -func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched limitRange.
    -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) {
    -	result = &v1.LimitRange{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange.
    -func (c *limitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) {
    -	if limitRange == nil {
    -		return nil, fmt.Errorf("limitRange provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(limitRange)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := limitRange.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("limitRange.Name must be provided to Apply")
    -	}
    -	result = &v1.LimitRange{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("limitranges").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
    index 06c77b4c45..75d20648f5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // NamespacesGetter has a method to return a NamespaceInterface.
    @@ -43,6 +40,7 @@ type NamespacesGetter interface {
     type NamespaceInterface interface {
     	Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error)
     	Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error)
    @@ -50,178 +48,25 @@ type NamespaceInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error)
     	Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error)
     	NamespaceExpansion
     }
     
     // namespaces implements NamespaceInterface
     type namespaces struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]
     }
     
     // newNamespaces returns a Namespaces
     func newNamespaces(c *CoreV1Client) *namespaces {
     	return &namespaces{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration](
    +			"namespaces",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.Namespace { return &v1.Namespace{} },
    +			func() *v1.NamespaceList { return &v1.NamespaceList{} }),
     	}
     }
    -
    -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
    -func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
    -	result = &v1.Namespace{}
    -	err = c.client.Get().
    -		Resource("namespaces").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Namespaces that match those selectors.
    -func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.NamespaceList{}
    -	err = c.client.Get().
    -		Resource("namespaces").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested namespaces.
    -func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("namespaces").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a namespace and creates it.  Returns the server's representation of the namespace, and an error, if there is any.
    -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) {
    -	result = &v1.Namespace{}
    -	err = c.client.Post().
    -		Resource("namespaces").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(namespace).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
    -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
    -	result = &v1.Namespace{}
    -	err = c.client.Put().
    -		Resource("namespaces").
    -		Name(namespace.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(namespace).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
    -	result = &v1.Namespace{}
    -	err = c.client.Put().
    -		Resource("namespaces").
    -		Name(namespace.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(namespace).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the namespace and deletes it. Returns an error if one occurs.
    -func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("namespaces").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched namespace.
    -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) {
    -	result = &v1.Namespace{}
    -	err = c.client.Patch(pt).
    -		Resource("namespaces").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace.
    -func (c *namespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) {
    -	if namespace == nil {
    -		return nil, fmt.Errorf("namespace provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(namespace)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := namespace.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("namespace.Name must be provided to Apply")
    -	}
    -	result = &v1.Namespace{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("namespaces").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *namespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) {
    -	if namespace == nil {
    -		return nil, fmt.Errorf("namespace provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(namespace)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := namespace.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("namespace.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Namespace{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("namespaces").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
    index be1116db15..4f720fb92e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
    @@ -32,6 +32,6 @@ type NamespaceExpansion interface {
     // Finalize takes the representation of a namespace to update.  Returns the server's representation of the namespace, and an error, if it occurs.
     func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
     	result = &v1.Namespace{}
    -	err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result)
    +	err = c.GetClient().Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result)
     	return
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
    index d9725b2f95..df1a7817f9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // NodesGetter has a method to return a NodeInterface.
    @@ -43,6 +40,7 @@ type NodesGetter interface {
     type NodeInterface interface {
     	Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error)
     	Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type NodeInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error)
     	Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
     	NodeExpansion
     }
     
     // nodes implements NodeInterface
     type nodes struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]
     }
     
     // newNodes returns a Nodes
     func newNodes(c *CoreV1Client) *nodes {
     	return &nodes{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration](
    +			"nodes",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.Node { return &v1.Node{} },
    +			func() *v1.NodeList { return &v1.NodeList{} }),
     	}
     }
    -
    -// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
    -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
    -	result = &v1.Node{}
    -	err = c.client.Get().
    -		Resource("nodes").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Nodes that match those selectors.
    -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.NodeList{}
    -	err = c.client.Get().
    -		Resource("nodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested nodes.
    -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("nodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a node and creates it.  Returns the server's representation of the node, and an error, if there is any.
    -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
    -	result = &v1.Node{}
    -	err = c.client.Post().
    -		Resource("nodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(node).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
    -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
    -	result = &v1.Node{}
    -	err = c.client.Put().
    -		Resource("nodes").
    -		Name(node.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(node).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
    -	result = &v1.Node{}
    -	err = c.client.Put().
    -		Resource("nodes").
    -		Name(node.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(node).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the node and deletes it. Returns an error if one occurs.
    -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("nodes").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("nodes").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched node.
    -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
    -	result = &v1.Node{}
    -	err = c.client.Patch(pt).
    -		Resource("nodes").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied node.
    -func (c *nodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) {
    -	if node == nil {
    -		return nil, fmt.Errorf("node provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(node)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := node.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("node.Name must be provided to Apply")
    -	}
    -	result = &v1.Node{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("nodes").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *nodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) {
    -	if node == nil {
    -		return nil, fmt.Errorf("node provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(node)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := node.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("node.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Node{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("nodes").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
    index bdf7bfed8d..df86253b0e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
    @@ -34,7 +34,7 @@ type NodeExpansion interface {
     // the node that the server returns, or an error.
     func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) {
     	result := &v1.Node{}
    -	err := c.client.Patch(types.StrategicMergePatchType).
    +	err := c.GetClient().Patch(types.StrategicMergePatchType).
     		Resource("nodes").
     		Name(nodeName).
     		SubResource("status").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
    index a8e2295977..8be40f8665 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PersistentVolumesGetter has a method to return a PersistentVolumeInterface.
    @@ -43,6 +40,7 @@ type PersistentVolumesGetter interface {
     type PersistentVolumeInterface interface {
     	Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error)
     	Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type PersistentVolumeInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error)
     	Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error)
     	PersistentVolumeExpansion
     }
     
     // persistentVolumes implements PersistentVolumeInterface
     type persistentVolumes struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]
     }
     
     // newPersistentVolumes returns a PersistentVolumes
     func newPersistentVolumes(c *CoreV1Client) *persistentVolumes {
     	return &persistentVolumes{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration](
    +			"persistentvolumes",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.PersistentVolume { return &v1.PersistentVolume{} },
    +			func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }),
     	}
     }
    -
    -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any.
    -func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) {
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Get().
    -		Resource("persistentvolumes").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors.
    -func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PersistentVolumeList{}
    -	err = c.client.Get().
    -		Resource("persistentvolumes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested persistentVolumes.
    -func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("persistentvolumes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a persistentVolume and creates it.  Returns the server's representation of the persistentVolume, and an error, if there is any.
    -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) {
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Post().
    -		Resource("persistentvolumes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolume).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
    -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Put().
    -		Resource("persistentvolumes").
    -		Name(persistentVolume.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolume).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Put().
    -		Resource("persistentvolumes").
    -		Name(persistentVolume.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolume).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs.
    -func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("persistentvolumes").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("persistentvolumes").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched persistentVolume.
    -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) {
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Patch(pt).
    -		Resource("persistentvolumes").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume.
    -func (c *persistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) {
    -	if persistentVolume == nil {
    -		return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(persistentVolume)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := persistentVolume.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply")
    -	}
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("persistentvolumes").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *persistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) {
    -	if persistentVolume == nil {
    -		return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(persistentVolume)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := persistentVolume.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.PersistentVolume{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("persistentvolumes").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
    index 2e7f4fb44f..7721b00923 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface.
    @@ -43,6 +40,7 @@ type PersistentVolumeClaimsGetter interface {
     type PersistentVolumeClaimInterface interface {
     	Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error)
     	Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type PersistentVolumeClaimInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error)
     	Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error)
     	PersistentVolumeClaimExpansion
     }
     
     // persistentVolumeClaims implements PersistentVolumeClaimInterface
     type persistentVolumeClaims struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]
     }
     
     // newPersistentVolumeClaims returns a PersistentVolumeClaims
     func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims {
     	return &persistentVolumeClaims{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration](
    +			"persistentvolumeclaims",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} },
    +			func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }),
     	}
     }
    -
    -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any.
    -func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors.
    -func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PersistentVolumeClaimList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims.
    -func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a persistentVolumeClaim and creates it.  Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
    -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolumeClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
    -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(persistentVolumeClaim.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolumeClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(persistentVolumeClaim.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(persistentVolumeClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs.
    -func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched persistentVolumeClaim.
    -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) {
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim.
    -func (c *persistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	if persistentVolumeClaim == nil {
    -		return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(persistentVolumeClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := persistentVolumeClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply")
    -	}
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *persistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) {
    -	if persistentVolumeClaim == nil {
    -		return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(persistentVolumeClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := persistentVolumeClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.PersistentVolumeClaim{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("persistentvolumeclaims").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
    index 63122cf3fb..470b7de7bc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PodsGetter has a method to return a PodInterface.
    @@ -43,6 +40,7 @@ type PodsGetter interface {
     type PodInterface interface {
     	Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error)
     	Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,6 +49,7 @@ type PodInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error)
     	Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
     	UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
     
    @@ -59,209 +58,27 @@ type PodInterface interface {
     
     // pods implements PodInterface
     type pods struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]
     }
     
     // newPods returns a Pods
     func newPods(c *CoreV1Client, namespace string) *pods {
     	return &pods{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration](
    +			"pods",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Pod { return &v1.Pod{} },
    +			func() *v1.PodList { return &v1.PodList{} }),
     	}
     }
     
    -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
    -func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
    -	result = &v1.Pod{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Pods that match those selectors.
    -func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PodList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested pods.
    -func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a pod and creates it.  Returns the server's representation of the pod, and an error, if there is any.
    -func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
    -	result = &v1.Pod{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(pod).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
    -func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
    -	result = &v1.Pod{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(pod.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(pod).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
    -	result = &v1.Pod{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(pod.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(pod).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the pod and deletes it. Returns an error if one occurs.
    -func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("pods").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched pod.
    -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
    -	result = &v1.Pod{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied pod.
    -func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) {
    -	if pod == nil {
    -		return nil, fmt.Errorf("pod provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(pod)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := pod.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("pod.Name must be provided to Apply")
    -	}
    -	result = &v1.Pod{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) {
    -	if pod == nil {
    -		return nil, fmt.Errorf("pod provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(pod)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := pod.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("pod.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Pod{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("pods").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
     func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
     	result = &v1.Pod{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("pods").
     		Name(podName).
     		SubResource("ephemeralcontainers").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
    index 8b6e0e932f..a2d4d70d46 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
    @@ -47,33 +47,33 @@ type PodExpansion interface {
     
     // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored).
     func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error {
    -	return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error()
    +	return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error()
     }
     
     // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource.
     // Equivalent to calling EvictV1beta1.
     // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1().
     func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error {
    -	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
    +	return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
     }
     
     func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error {
    -	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
    +	return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
     }
     
     func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error {
    -	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
    +	return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
     }
     
     // Get constructs a request for getting the logs for a pod
     func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request {
    -	return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec)
    +	return c.GetClient().Get().Namespace(c.GetNamespace()).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec)
     }
     
     // ProxyGet returns a response of the pod by calling it through the proxy.
     func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
    -	request := c.client.Get().
    -		Namespace(c.ns).
    +	request := c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("pods").
     		SubResource("proxy").
     		Name(net.JoinSchemeNamePort(scheme, name, port)).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
    index ff90fc0e62..060a059093 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PodTemplatesGetter has a method to return a PodTemplateInterface.
    @@ -55,154 +52,18 @@ type PodTemplateInterface interface {
     
     // podTemplates implements PodTemplateInterface
     type podTemplates struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]
     }
     
     // newPodTemplates returns a PodTemplates
     func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates {
     	return &podTemplates{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration](
    +			"podtemplates",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.PodTemplate { return &v1.PodTemplate{} },
    +			func() *v1.PodTemplateList { return &v1.PodTemplateList{} }),
     	}
     }
    -
    -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any.
    -func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) {
    -	result = &v1.PodTemplate{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors.
    -func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PodTemplateList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested podTemplates.
    -func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a podTemplate and creates it.  Returns the server's representation of the podTemplate, and an error, if there is any.
    -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) {
    -	result = &v1.PodTemplate{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podTemplate).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any.
    -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) {
    -	result = &v1.PodTemplate{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		Name(podTemplate.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podTemplate).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs.
    -func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched podTemplate.
    -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) {
    -	result = &v1.PodTemplate{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate.
    -func (c *podTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) {
    -	if podTemplate == nil {
    -		return nil, fmt.Errorf("podTemplate provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podTemplate)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podTemplate.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podTemplate.Name must be provided to Apply")
    -	}
    -	result = &v1.PodTemplate{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("podtemplates").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
    index 49c75d967b..9b275ed1ba 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
    @@ -20,9 +20,6 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	autoscalingv1 "k8s.io/api/autoscaling/v1"
     	v1 "k8s.io/api/core/v1"
    @@ -30,8 +27,8 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ReplicationControllersGetter has a method to return a ReplicationControllerInterface.
    @@ -44,6 +41,7 @@ type ReplicationControllersGetter interface {
     type ReplicationControllerInterface interface {
     	Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error)
     	Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -52,6 +50,7 @@ type ReplicationControllerInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error)
     	Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error)
     	GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
     	UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
    @@ -61,209 +60,27 @@ type ReplicationControllerInterface interface {
     
     // replicationControllers implements ReplicationControllerInterface
     type replicationControllers struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]
     }
     
     // newReplicationControllers returns a ReplicationControllers
     func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers {
     	return &replicationControllers{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration](
    +			"replicationcontrollers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ReplicationController { return &v1.ReplicationController{} },
    +			func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }),
     	}
     }
     
    -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any.
    -func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) {
    -	result = &v1.ReplicationController{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors.
    -func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ReplicationControllerList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested replicationControllers.
    -func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a replicationController and creates it.  Returns the server's representation of the replicationController, and an error, if there is any.
    -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) {
    -	result = &v1.ReplicationController{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicationController).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any.
    -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
    -	result = &v1.ReplicationController{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(replicationController.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicationController).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
    -	result = &v1.ReplicationController{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(replicationController.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicationController).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs.
    -func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched replicationController.
    -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) {
    -	result = &v1.ReplicationController{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController.
    -func (c *replicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) {
    -	if replicationController == nil {
    -		return nil, fmt.Errorf("replicationController provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicationController)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := replicationController.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicationController.Name must be provided to Apply")
    -	}
    -	result = &v1.ReplicationController{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *replicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) {
    -	if replicationController == nil {
    -		return nil, fmt.Errorf("replicationController provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicationController)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := replicationController.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicationController.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.ReplicationController{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicationcontrollers").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
     func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("replicationcontrollers").
     		Name(replicationControllerName).
     		SubResource("scale").
    @@ -276,8 +93,8 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
     	result = &autoscalingv1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("replicationcontrollers").
     		Name(replicationControllerName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
    index 8444d164ed..4b2dcd3b59 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ResourceQuotasGetter has a method to return a ResourceQuotaInterface.
    @@ -43,6 +40,7 @@ type ResourceQuotasGetter interface {
     type ResourceQuotaInterface interface {
     	Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error)
     	Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type ResourceQuotaInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error)
     	Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error)
     	ResourceQuotaExpansion
     }
     
     // resourceQuotas implements ResourceQuotaInterface
     type resourceQuotas struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]
     }
     
     // newResourceQuotas returns a ResourceQuotas
     func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas {
     	return &resourceQuotas{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration](
    +			"resourcequotas",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ResourceQuota { return &v1.ResourceQuota{} },
    +			func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }),
     	}
     }
    -
    -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
    -func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) {
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
    -func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ResourceQuotaList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceQuotas.
    -func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceQuota and creates it.  Returns the server's representation of the resourceQuota, and an error, if there is any.
    -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) {
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceQuota).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
    -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(resourceQuota.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceQuota).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(resourceQuota.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceQuota).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
    -func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceQuota.
    -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) {
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota.
    -func (c *resourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) {
    -	if resourceQuota == nil {
    -		return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceQuota)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceQuota.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply")
    -	}
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *resourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) {
    -	if resourceQuota == nil {
    -		return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceQuota)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := resourceQuota.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.ResourceQuota{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourcequotas").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
    index 4aba330381..12a8d1178f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // SecretsGetter has a method to return a SecretInterface.
    @@ -55,154 +52,18 @@ type SecretInterface interface {
     
     // secrets implements SecretInterface
     type secrets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]
     }
     
     // newSecrets returns a Secrets
     func newSecrets(c *CoreV1Client, namespace string) *secrets {
     	return &secrets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration](
    +			"secrets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Secret { return &v1.Secret{} },
    +			func() *v1.SecretList { return &v1.SecretList{} }),
     	}
     }
    -
    -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
    -func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) {
    -	result = &v1.Secret{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Secrets that match those selectors.
    -func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.SecretList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested secrets.
    -func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a secret and creates it.  Returns the server's representation of the secret, and an error, if there is any.
    -func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
    -	result = &v1.Secret{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(secret).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
    -func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) {
    -	result = &v1.Secret{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		Name(secret.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(secret).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the secret and deletes it. Returns an error if one occurs.
    -func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched secret.
    -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
    -	result = &v1.Secret{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied secret.
    -func (c *secrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) {
    -	if secret == nil {
    -		return nil, fmt.Errorf("secret provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(secret)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := secret.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("secret.Name must be provided to Apply")
    -	}
    -	result = &v1.Secret{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("secrets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
    index 3fe22ba444..ec935a3247 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/core/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ServicesGetter has a method to return a ServiceInterface.
    @@ -43,6 +40,7 @@ type ServicesGetter interface {
     type ServiceInterface interface {
     	Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error)
     	Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error)
    @@ -50,190 +48,25 @@ type ServiceInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error)
     	Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error)
     	ServiceExpansion
     }
     
     // services implements ServiceInterface
     type services struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]
     }
     
     // newServices returns a Services
     func newServices(c *CoreV1Client, namespace string) *services {
     	return &services{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration](
    +			"services",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Service { return &v1.Service{} },
    +			func() *v1.ServiceList { return &v1.ServiceList{} }),
     	}
     }
    -
    -// Get takes name of the service, and returns the corresponding service object, and an error if there is any.
    -func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) {
    -	result = &v1.Service{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Services that match those selectors.
    -func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ServiceList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("services").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested services.
    -func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("services").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a service and creates it.  Returns the server's representation of the service, and an error, if there is any.
    -func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) {
    -	result = &v1.Service{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("services").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(service).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
    -func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
    -	result = &v1.Service{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(service.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(service).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
    -	result = &v1.Service{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(service.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(service).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the service and deletes it. Returns an error if one occurs.
    -func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched service.
    -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) {
    -	result = &v1.Service{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied service.
    -func (c *services) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) {
    -	if service == nil {
    -		return nil, fmt.Errorf("service provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(service)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := service.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("service.Name must be provided to Apply")
    -	}
    -	result = &v1.Service{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *services) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) {
    -	if service == nil {
    -		return nil, fmt.Errorf("service provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(service)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := service.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("service.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Service{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("services").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
    index 4937fd1a39..9a6f781387 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
    @@ -28,8 +28,8 @@ type ServiceExpansion interface {
     
     // ProxyGet returns a response of the service by calling it through the proxy.
     func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
    -	request := c.client.Get().
    -		Namespace(c.ns).
    +	request := c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("services").
     		SubResource("proxy").
     		Name(net.JoinSchemeNamePort(scheme, name, port)).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
    index bdf589b960..eb995d4548 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
    @@ -20,9 +20,6 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	authenticationv1 "k8s.io/api/authentication/v1"
     	v1 "k8s.io/api/core/v1"
    @@ -30,8 +27,8 @@ import (
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	corev1 "k8s.io/client-go/applyconfigurations/core/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ServiceAccountsGetter has a method to return a ServiceAccountInterface.
    @@ -58,163 +55,27 @@ type ServiceAccountInterface interface {
     
     // serviceAccounts implements ServiceAccountInterface
     type serviceAccounts struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]
     }
     
     // newServiceAccounts returns a ServiceAccounts
     func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts {
     	return &serviceAccounts{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration](
    +			"serviceaccounts",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.ServiceAccount { return &v1.ServiceAccount{} },
    +			func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }),
     	}
     }
     
    -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any.
    -func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) {
    -	result = &v1.ServiceAccount{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors.
    -func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ServiceAccountList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested serviceAccounts.
    -func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a serviceAccount and creates it.  Returns the server's representation of the serviceAccount, and an error, if there is any.
    -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) {
    -	result = &v1.ServiceAccount{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(serviceAccount).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
    -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) {
    -	result = &v1.ServiceAccount{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		Name(serviceAccount.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(serviceAccount).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs.
    -func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched serviceAccount.
    -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) {
    -	result = &v1.ServiceAccount{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount.
    -func (c *serviceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) {
    -	if serviceAccount == nil {
    -		return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(serviceAccount)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := serviceAccount.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply")
    -	}
    -	result = &v1.ServiceAccount{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("serviceaccounts").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // CreateToken takes the representation of a tokenRequest and creates it.  Returns the server's representation of the tokenRequest, and an error, if there is any.
     func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) {
     	result = &authenticationv1.TokenRequest{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    +	err = c.GetClient().Post().
    +		Namespace(c.GetNamespace()).
     		Resource("serviceaccounts").
     		Name(serviceAccountName).
     		SubResource("token").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
    index 63e616b033..1f927055cc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/discovery/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EndpointSlicesGetter has a method to return a EndpointSliceInterface.
    @@ -55,154 +52,18 @@ type EndpointSliceInterface interface {
     
     // endpointSlices implements EndpointSliceInterface
     type endpointSlices struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]
     }
     
     // newEndpointSlices returns a EndpointSlices
     func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices {
     	return &endpointSlices{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration](
    +			"endpointslices",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.EndpointSlice { return &v1.EndpointSlice{} },
    +			func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }),
     	}
     }
    -
    -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
    -func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) {
    -	result = &v1.EndpointSlice{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
    -func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.EndpointSliceList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested endpointSlices.
    -func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
    -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) {
    -	result = &v1.EndpointSlice{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpointSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
    -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) {
    -	result = &v1.EndpointSlice{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(endpointSlice.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpointSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
    -func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched endpointSlice.
    -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) {
    -	result = &v1.EndpointSlice{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice.
    -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) {
    -	if endpointSlice == nil {
    -		return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(endpointSlice)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := endpointSlice.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply")
    -	}
    -	result = &v1.EndpointSlice{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go
    index d159b5ea9e..6bbbde82ec 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go
    @@ -44,22 +44,24 @@ var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice")
     
     // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
     func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) {
    +	emptyResult := &v1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{})
    +		Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.EndpointSlice), err
     }
     
     // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
     func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) {
    +	emptyResult := &v1.EndpointSliceList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{})
    +		Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested endpointSlices.
     func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
     func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) {
    +	emptyResult := &v1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{})
    +		Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.EndpointSlice), err
     }
     
     // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
     func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) {
    +	emptyResult := &v1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{})
    +		Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.EndpointSlice), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.EndpointSliceList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.D
     
     // Patch applies the patch and returns the patched endpointSlice.
     func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) {
    +	emptyResult := &v1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.EndpointSlice), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery
     	if name == nil {
     		return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EndpointSlice{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.EndpointSlice), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
    index 2ade833029..298cfbc879 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/discovery/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EndpointSlicesGetter has a method to return a EndpointSliceInterface.
    @@ -55,154 +52,18 @@ type EndpointSliceInterface interface {
     
     // endpointSlices implements EndpointSliceInterface
     type endpointSlices struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]
     }
     
     // newEndpointSlices returns a EndpointSlices
     func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices {
     	return &endpointSlices{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration](
    +			"endpointslices",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} },
    +			func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }),
     	}
     }
    -
    -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
    -func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
    -	result = &v1beta1.EndpointSlice{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
    -func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.EndpointSliceList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested endpointSlices.
    -func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
    -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) {
    -	result = &v1beta1.EndpointSlice{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpointSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
    -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) {
    -	result = &v1beta1.EndpointSlice{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(endpointSlice.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(endpointSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
    -func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched endpointSlice.
    -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
    -	result = &v1beta1.EndpointSlice{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice.
    -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) {
    -	if endpointSlice == nil {
    -		return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(endpointSlice)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := endpointSlice.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.EndpointSlice{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("endpointslices").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go
    index 2683718113..65cf69b9dc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go
    @@ -44,22 +44,24 @@ var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice")
     
     // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
     func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
    +	emptyResult := &v1beta1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{})
    +		Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.EndpointSlice), err
     }
     
     // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
     func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
    +	emptyResult := &v1beta1.EndpointSliceList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{})
    +		Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested endpointSlices.
     func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
     func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) {
    +	emptyResult := &v1beta1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
    +		Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.EndpointSlice), err
     }
     
     // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
     func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) {
    +	emptyResult := &v1beta1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
    +		Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.EndpointSlice), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.Delet
     
     // Patch applies the patch and returns the patched endpointSlice.
     func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
    +	emptyResult := &v1beta1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.EndpointSlice), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery
     	if name == nil {
     		return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.EndpointSlice{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.EndpointSlice{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.EndpointSlice), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
    index c9f2bbed50..d021a76c4e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/events/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	eventsv1 "k8s.io/client-go/applyconfigurations/events/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EventsGetter has a method to return a EventInterface.
    @@ -55,154 +52,18 @@ type EventInterface interface {
     
     // events implements EventInterface
     type events struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]
     }
     
     // newEvents returns a Events
     func newEvents(c *EventsV1Client, namespace string) *events {
     	return &events{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration](
    +			"events",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Event { return &v1.Event{} },
    +			func() *v1.EventList { return &v1.EventList{} }),
     	}
     }
    -
    -// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
    -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Events that match those selectors.
    -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.EventList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested events.
    -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(event.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the event and deletes it. Returns an error if one occurs.
    -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched event.
    -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) {
    -	result = &v1.Event{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied event.
    -func (c *events) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) {
    -	if event == nil {
    -		return nil, fmt.Errorf("event provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(event)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := event.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("event.Name must be provided to Apply")
    -	}
    -	result = &v1.Event{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go
    index 0928781f1e..1e79eb9845 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go
    @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event")
     
     // Get takes name of the event, and returns the corresponding event object, and an error if there is any.
     func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{})
    +		Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
     
     // List takes label and field selectors, and returns the list of Events that match those selectors.
     func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) {
    +	emptyResult := &v1.EventList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{})
    +		Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested events.
     func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{})
    +		Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
     
     // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{})
    +		Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.EventList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt
     
     // Patch applies the patch and returns the patched event.
     func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) {
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfig
     	if name == nil {
     		return nil, fmt.Errorf("event.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Event), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
    index dfdf8b8979..77ca2e7756 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/events/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // EventsGetter has a method to return a EventInterface.
    @@ -55,154 +52,18 @@ type EventInterface interface {
     
     // events implements EventInterface
     type events struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]
     }
     
     // newEvents returns a Events
     func newEvents(c *EventsV1beta1Client, namespace string) *events {
     	return &events{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration](
    +			"events",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Event { return &v1beta1.Event{} },
    +			func() *v1beta1.EventList { return &v1beta1.EventList{} }),
     	}
     }
    -
    -// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
    -func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
    -	result = &v1beta1.Event{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Events that match those selectors.
    -func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.EventList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested events.
    -func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) {
    -	result = &v1beta1.Event{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
    -func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) {
    -	result = &v1beta1.Event{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(event.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(event).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the event and deletes it. Returns an error if one occurs.
    -func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("events").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched event.
    -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) {
    -	result = &v1beta1.Event{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied event.
    -func (c *events) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) {
    -	if event == nil {
    -		return nil, fmt.Errorf("event provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(event)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := event.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("event.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Event{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("events").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
    index 562f8d5e45..4ddbaa31af 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
    @@ -44,11 +44,11 @@ type EventExpansion interface {
     // it must either match this event client's namespace, or this event client must
     // have been created with the "" namespace.
     func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) {
    -	if e.ns != "" && event.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
    +	if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace())
     	}
     	result := &v1beta1.Event{}
    -	err := e.client.Post().
    +	err := e.GetClient().Post().
     		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
     		Resource("events").
     		Body(event).
    @@ -64,11 +64,11 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
     // created with the "" namespace.
     // Update also requires the ResourceVersion to be set in the event object.
     func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) {
    -	if e.ns != "" && event.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
    +	if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace())
     	}
     	result := &v1beta1.Event{}
    -	err := e.client.Put().
    +	err := e.GetClient().Put().
     		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
     		Resource("events").
     		Name(event.Name).
    @@ -84,11 +84,11 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
     // The namespace must either match this event client's namespace, or this event client must
     // have been created with the "" namespace.
     func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) {
    -	if e.ns != "" && event.Namespace != e.ns {
    -		return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
    +	if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() {
    +		return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace())
     	}
     	result := &v1beta1.Event{}
    -	err := e.client.Patch(types.StrategicMergePatchType).
    +	err := e.GetClient().Patch(types.StrategicMergePatchType).
     		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
     		Resource("events").
     		Name(event.Name).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
    index 522b4dc063..b00f2126a5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
    @@ -44,22 +44,24 @@ var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event")
     
     // Get takes name of the event, and returns the corresponding event object, and an error if there is any.
     func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
    +	emptyResult := &v1beta1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{})
    +		Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Event), err
     }
     
     // List takes label and field selectors, and returns the list of Events that match those selectors.
     func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) {
    +	emptyResult := &v1beta1.EventList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{})
    +		Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1b
     // Watch returns a watch.Interface that watches the requested events.
     func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) {
    +	emptyResult := &v1beta1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{})
    +		Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Event), err
     }
     
     // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
     func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) {
    +	emptyResult := &v1beta1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{})
    +		Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Event), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOpti
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.EventList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions
     
     // Patch applies the patch and returns the patched event.
     func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) {
    +	emptyResult := &v1beta1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Event), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1beta1.EventApplyC
     	if name == nil {
     		return nil, fmt.Errorf("event.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Event{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Event{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Event), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
    index ffe219fdaa..f86194bf05 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DaemonSetsGetter has a method to return a DaemonSetInterface.
    @@ -43,6 +40,7 @@ type DaemonSetsGetter interface {
     type DaemonSetInterface interface {
     	Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error)
     	Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type DaemonSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error)
     	Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error)
     	DaemonSetExpansion
     }
     
     // daemonSets implements DaemonSetInterface
     type daemonSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]
     }
     
     // newDaemonSets returns a DaemonSets
     func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets {
     	return &daemonSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration](
    +			"daemonsets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} },
    +			func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }),
     	}
     }
    -
    -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
    -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
    -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.DaemonSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested daemonSets.
    -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) {
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
    -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(daemonSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(daemonSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
    -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched daemonSet.
    -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) {
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet.
    -func (c *daemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) {
    -	if daemonSet == nil {
    -		return nil, fmt.Errorf("daemonSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(daemonSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := daemonSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.DaemonSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("daemonsets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
    index c41d8dbc26..021fbb3b3b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
    @@ -22,15 +22,14 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // DeploymentsGetter has a method to return a DeploymentInterface.
    @@ -43,6 +42,7 @@ type DeploymentsGetter interface {
     type DeploymentInterface interface {
     	Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
     	Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,6 +51,7 @@ type DeploymentInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
     	Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
     	GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
     	UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
    @@ -61,209 +62,27 @@ type DeploymentInterface interface {
     
     // deployments implements DeploymentInterface
     type deployments struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]
     }
     
     // newDeployments returns a Deployments
     func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments {
     	return &deployments{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration](
    +			"deployments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Deployment { return &v1beta1.Deployment{} },
    +			func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }),
     	}
     }
     
    -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
    -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Deployments that match those selectors.
    -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.DeploymentList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested deployments.
    -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
    -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(deployment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(deployment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
    -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched deployment.
    -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment.
    -func (c *deployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *deployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) {
    -	if deployment == nil {
    -		return nil, fmt.Errorf("deployment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(deployment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := deployment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.Deployment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("deployments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any.
     func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
     	result = &v1beta1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    @@ -276,8 +95,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
     	result = &v1beta1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    @@ -301,8 +120,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca
     	}
     
     	result = &v1beta1.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("deployments").
     		Name(deploymentName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
    index 5c409ac996..bd75b8a38e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
    @@ -31,5 +31,5 @@ type DeploymentExpansion interface {
     
     // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace.
     func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error {
    -	return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error()
    +	return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error()
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
    index abe3d2da1f..f14943082d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
    @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet")
     
     // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
     func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
     
     // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
     func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
    +	emptyResult := &v1beta1.DaemonSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{})
    +		Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested daemonSets.
     func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) {
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
     
     // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
     func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) {
    +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt
     
     // Patch applies the patch and returns the patched daemonSet.
     func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) {
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv
     	if name == nil {
     		return nil, fmt.Errorf("daemonSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.DaemonSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.DaemonSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.DaemonSet), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
    index e399361a92..b81d4a96c4 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
    @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment")
     
     // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
     func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
    +		Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // List takes label and field selectors, and returns the list of Deployments that match those selectors.
     func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
    +	emptyResult := &v1beta1.DeploymentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{})
    +		Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested deployments.
     func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
     func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) {
    +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched deployment.
     func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *extensionsv1bet
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
    @@ -179,33 +186,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *extension
     	if name == nil {
     		return nil, fmt.Errorf("deployment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Deployment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Deployment), err
     }
     
     // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
    @@ -220,11 +230,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string,
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
    index 48ae51e80d..ae95682fc1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
    @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress")
     
     // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
     func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{})
    +		Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // List takes label and field selectors, and returns the list of Ingresses that match those selectors.
     func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
    +	emptyResult := &v1beta1.IngressList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{})
    +		Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *
     // Watch returns a watch.Interface that watches the requested ingresses.
     func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) {
    +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.IngressList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti
     
     // Patch applies the patch and returns the patched ingress.
     func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *extensionsv1beta1.In
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *extensionsv1be
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go
    index a32022140a..d829a0c638 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go
    @@ -44,22 +44,24 @@ var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy")
     
     // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
     func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) {
    +	emptyResult := &v1beta1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{})
    +		Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.NetworkPolicy), err
     }
     
     // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
     func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) {
    +	emptyResult := &v1beta1.NetworkPolicyList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1beta1.NetworkPolicyList{})
    +		Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested networkPolicies.
     func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
     func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) {
    +	emptyResult := &v1beta1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{})
    +		Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.NetworkPolicy), err
     }
     
     // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
     func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) {
    +	emptyResult := &v1beta1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{})
    +		Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.NetworkPolicy), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.Dele
     
     // Patch applies the patch and returns the patched networkPolicy.
     func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) {
    +	emptyResult := &v1beta1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.NetworkPolicy), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensio
     	if name == nil {
     		return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.NetworkPolicy{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.NetworkPolicy), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
    index 42da6fa8b6..5d94ba73b0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
    @@ -44,22 +44,24 @@ var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet")
     
     // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
     func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
     
     // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
     func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
    +	emptyResult := &v1beta1.ReplicaSetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{})
    +		Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested replicaSets.
     func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) {
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
     
     // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
     func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) {
    +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched replicaSet.
     func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *extensionsv1bet
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
    @@ -179,33 +186,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *extension
     	if name == nil {
     		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ReplicaSet{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.ReplicaSet{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ReplicaSet), err
     }
     
     // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any.
     func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{})
    +		Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
     
     // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{})
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
    @@ -220,11 +230,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string,
     	if err != nil {
     		return nil, err
     	}
    +	emptyResult := &v1beta1.Scale{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Scale), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
    index dd4012cc23..4511c93fc2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IngressesGetter has a method to return a IngressInterface.
    @@ -43,6 +40,7 @@ type IngressesGetter interface {
     type IngressInterface interface {
     	Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
     	Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type IngressInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
     	Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
     	IngressExpansion
     }
     
     // ingresses implements IngressInterface
     type ingresses struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]
     }
     
     // newIngresses returns a Ingresses
     func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses {
     	return &ingresses{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration](
    +			"ingresses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Ingress { return &v1beta1.Ingress{} },
    +			func() *v1beta1.IngressList { return &v1beta1.IngressList{} }),
     	}
     }
    -
    -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
    -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
    -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.IngressList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested ingresses.
    -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
    -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched ingress.
    -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress.
    -func (c *ingresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
    index 978b26db03..afa8203c3d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
    @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface {
     
     // networkPolicies implements NetworkPolicyInterface
     type networkPolicies struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]
     }
     
     // newNetworkPolicies returns a NetworkPolicies
     func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies {
     	return &networkPolicies{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration](
    +			"networkpolicies",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} },
    +			func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }),
     	}
     }
    -
    -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
    -func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) {
    -	result = &v1beta1.NetworkPolicy{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
    -func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.NetworkPolicyList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested networkPolicies.
    -func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
    -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) {
    -	result = &v1beta1.NetworkPolicy{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(networkPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
    -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) {
    -	result = &v1beta1.NetworkPolicy{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(networkPolicy.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(networkPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
    -func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched networkPolicy.
    -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) {
    -	result = &v1beta1.NetworkPolicy{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy.
    -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) {
    -	if networkPolicy == nil {
    -		return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(networkPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := networkPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.NetworkPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
    index 3c907a3a04..8973948f39 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
    @@ -22,15 +22,14 @@ import (
     	"context"
     	json "encoding/json"
     	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/extensions/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ReplicaSetsGetter has a method to return a ReplicaSetInterface.
    @@ -43,6 +42,7 @@ type ReplicaSetsGetter interface {
     type ReplicaSetInterface interface {
     	Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error)
     	Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,6 +51,7 @@ type ReplicaSetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error)
     	Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error)
     	GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
     	UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
    @@ -61,209 +62,27 @@ type ReplicaSetInterface interface {
     
     // replicaSets implements ReplicaSetInterface
     type replicaSets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]
     }
     
     // newReplicaSets returns a ReplicaSets
     func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets {
     	return &replicaSets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration](
    +			"replicasets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} },
    +			func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }),
     	}
     }
     
    -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
    -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
    -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ReplicaSetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested replicaSets.
    -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) {
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
    -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(replicaSet.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(replicaSet).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
    -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched replicaSet.
    -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet.
    -func (c *replicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) {
    -	if replicaSet == nil {
    -		return nil, fmt.Errorf("replicaSet provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(replicaSet)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := replicaSet.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("replicaSet.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.ReplicaSet{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("replicasets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
     // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any.
     func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
     	result = &v1beta1.Scale{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    +	err = c.GetClient().Get().
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    @@ -276,8 +95,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio
     // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
     func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
     	result = &v1beta1.Scale{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    +	err = c.GetClient().Put().
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    @@ -301,8 +120,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca
     	}
     
     	result = &v1beta1.Scale{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    +	err = c.GetClient().Patch(types.ApplyPatchType).
    +		Namespace(c.GetNamespace()).
     		Resource("replicasets").
     		Name(replicaSetName).
     		SubResource("scale").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go
    index 922a60d89b..bf2b63fb2d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go
    @@ -43,20 +43,22 @@ var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema")
     
     // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
     func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) {
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{})
    +		Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
     
     // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
     func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) {
    +	emptyResult := &v1.FlowSchemaList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{})
    +		Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested flowSchemas.
     func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts))
     }
     
     // Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) {
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{})
    +		Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
     
     // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) {
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) {
    +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) {
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.FlowSchemaList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.Dele
     
     // Patch applies the patch and returns the patched flowSchema.
     func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) {
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.F
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.FlowSchema), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go
    index 27d9586748..053de56ed1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go
    @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLe
     
     // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
     func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
     func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) {
    +	emptyResult := &v1.PriorityLevelConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.
     // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
     func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts))
     }
     
     // Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
     
     // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) {
    +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
    @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched priorityLevelConfiguration.
     func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
    @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
    @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityLevelConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
    index bd36c5e6a4..2606cee070 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/flowcontrol/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // FlowSchemasGetter has a method to return a FlowSchemaInterface.
    @@ -43,6 +40,7 @@ type FlowSchemasGetter interface {
     type FlowSchemaInterface interface {
     	Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error)
     	Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type FlowSchemaInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error)
     	Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error)
     	FlowSchemaExpansion
     }
     
     // flowSchemas implements FlowSchemaInterface
     type flowSchemas struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]
     }
     
     // newFlowSchemas returns a FlowSchemas
     func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas {
     	return &flowSchemas{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration](
    +			"flowschemas",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.FlowSchema { return &v1.FlowSchema{} },
    +			func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }),
     	}
     }
    -
    -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
    -func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) {
    -	result = &v1.FlowSchema{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
    -func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.FlowSchemaList{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested flowSchemas.
    -func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) {
    -	result = &v1.FlowSchema{}
    -	err = c.client.Post().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) {
    -	result = &v1.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) {
    -	result = &v1.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs.
    -func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched flowSchema.
    -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) {
    -	result = &v1.FlowSchema{}
    -	err = c.client.Patch(pt).
    -		Resource("flowschemas").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema.
    -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -	result = &v1.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
    index 797fe94035..64907af606 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/flowcontrol/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface.
    @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface {
     type PriorityLevelConfigurationInterface interface {
     	Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error)
     	Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error)
     	Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error)
     	PriorityLevelConfigurationExpansion
     }
     
     // priorityLevelConfigurations implements PriorityLevelConfigurationInterface
     type priorityLevelConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]
     }
     
     // newPriorityLevelConfigurations returns a PriorityLevelConfigurations
     func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations {
     	return &priorityLevelConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration](
    +			"prioritylevelconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} },
    +			func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
    -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
    -func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PriorityLevelConfigurationList{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
    -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Post().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
    -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) {
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go
    index be7a7e390f..8b4435a8ad 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go
    @@ -43,20 +43,22 @@ var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema")
     
     // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
     func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) {
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
     
     // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
     func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) {
    +	emptyResult := &v1beta1.FlowSchemaList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{})
    +		Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested flowSchemas.
     func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts))
     }
     
     // Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) {
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
     
     // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) {
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) {
    +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) {
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched flowSchema.
     func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) {
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.FlowSchema), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go
    index 698a168b37..e139e4dceb 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go
    @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Prior
     
     // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
     func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
     func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List
     // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
     func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts))
     }
     
     // Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
     
     // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) {
    +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
    @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched priorityLevelConfiguration.
     func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
    @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
    @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityLevelConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
    index a9d38becf9..3c6805b9bc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // FlowSchemasGetter has a method to return a FlowSchemaInterface.
    @@ -43,6 +40,7 @@ type FlowSchemasGetter interface {
     type FlowSchemaInterface interface {
     	Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error)
     	Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type FlowSchemaInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error)
     	Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error)
     	FlowSchemaExpansion
     }
     
     // flowSchemas implements FlowSchemaInterface
     type flowSchemas struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]
     }
     
     // newFlowSchemas returns a FlowSchemas
     func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas {
     	return &flowSchemas{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration](
    +			"flowschemas",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} },
    +			func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }),
     	}
     }
    -
    -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
    -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) {
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
    -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.FlowSchemaList{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested flowSchemas.
    -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) {
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Post().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) {
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) {
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs.
    -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched flowSchema.
    -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) {
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Patch(pt).
    -		Resource("flowschemas").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema.
    -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
    index 41f35cbccd..049f4049d6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface.
    @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface {
     type PriorityLevelConfigurationInterface interface {
     	Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error)
     	Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error)
     	Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error)
     	PriorityLevelConfigurationExpansion
     }
     
     // priorityLevelConfigurations implements PriorityLevelConfigurationInterface
     type priorityLevelConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]
     }
     
     // newPriorityLevelConfigurations returns a PriorityLevelConfigurations
     func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations {
     	return &priorityLevelConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration](
    +			"prioritylevelconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} },
    +			func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
    -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
    -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.PriorityLevelConfigurationList{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
    -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Post().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
    -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go
    index 7ce6d2116b..41cad9b7a1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go
    @@ -43,20 +43,22 @@ var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema")
     
     // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
     func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) {
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
     
     // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
     func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) {
    +	emptyResult := &v1beta2.FlowSchemaList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta2.FlowSchemaList{})
    +		Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested flowSchemas.
     func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts))
     }
     
     // Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) {
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
     
     // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) {
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) {
    +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) {
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched flowSchema.
     func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) {
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.FlowSchema), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go
    index 7340f8a09e..f9eac85d51 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go
    @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("Prior
     
     // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
     func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
     func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta2.PriorityLevelConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List
     // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
     func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts))
     }
     
     // Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
     
     // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) {
    +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
    @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched priorityLevelConfiguration.
     func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
    @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
    @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta2.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta2.PriorityLevelConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
    index 3a1f12b6a2..2706157628 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // FlowSchemasGetter has a method to return a FlowSchemaInterface.
    @@ -43,6 +40,7 @@ type FlowSchemasGetter interface {
     type FlowSchemaInterface interface {
     	Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error)
     	Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type FlowSchemaInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error)
     	Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error)
     	FlowSchemaExpansion
     }
     
     // flowSchemas implements FlowSchemaInterface
     type flowSchemas struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]
     }
     
     // newFlowSchemas returns a FlowSchemas
     func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas {
     	return &flowSchemas{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration](
    +			"flowschemas",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} },
    +			func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }),
     	}
     }
    -
    -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
    -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) {
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
    -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.FlowSchemaList{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested flowSchemas.
    -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) {
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Post().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) {
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) {
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs.
    -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched flowSchema.
    -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) {
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Patch(pt).
    -		Resource("flowschemas").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema.
    -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
    index f028869f17..00ead4c60d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
    @@ -20,17 +20,14 @@ package v1beta2
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface.
    @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface {
     type PriorityLevelConfigurationInterface interface {
     	Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error)
     	Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error)
     	Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error)
     	PriorityLevelConfigurationExpansion
     }
     
     // priorityLevelConfigurations implements PriorityLevelConfigurationInterface
     type priorityLevelConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]
     }
     
     // newPriorityLevelConfigurations returns a PriorityLevelConfigurations
     func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations {
     	return &priorityLevelConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration](
    +			"prioritylevelconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} },
    +			func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
    -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
    -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta2.PriorityLevelConfigurationList{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
    -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Post().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
    -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta2.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go
    index 1371f6ed67..70dca796a4 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go
    @@ -43,20 +43,22 @@ var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema")
     
     // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
     func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) {
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
     
     // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
     func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) {
    +	emptyResult := &v1beta3.FlowSchemaList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta3.FlowSchemaList{})
    +		Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested flowSchemas.
     func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts))
     }
     
     // Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) {
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
     
     // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
     func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) {
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) {
    +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) {
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta3.FlowSchemaList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched flowSchema.
     func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) {
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr
     	if name == nil {
     		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta3.FlowSchema{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.FlowSchema{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.FlowSchema), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go
    index a0e266fecb..45836a6453 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go
    @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("Prior
     
     // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
     func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
     func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfigurationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta3.PriorityLevelConfigurationList{})
    +		Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List
     // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
     func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts))
     }
     
     // Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
     
     // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
     func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) {
    +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
    @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context,
     
     // Patch applies the patch and returns the patched priorityLevelConfiguration.
     func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) {
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
    @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
    @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior
     	if name == nil {
     		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta3.PriorityLevelConfiguration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.PriorityLevelConfiguration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta3.PriorityLevelConfiguration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
    index 5fa39d6baf..35f600cdf4 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
    @@ -20,17 +20,14 @@ package v1beta3
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // FlowSchemasGetter has a method to return a FlowSchemaInterface.
    @@ -43,6 +40,7 @@ type FlowSchemasGetter interface {
     type FlowSchemaInterface interface {
     	Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error)
     	Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type FlowSchemaInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error)
     	Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error)
     	FlowSchemaExpansion
     }
     
     // flowSchemas implements FlowSchemaInterface
     type flowSchemas struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]
     }
     
     // newFlowSchemas returns a FlowSchemas
     func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas {
     	return &flowSchemas{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration](
    +			"flowschemas",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} },
    +			func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }),
     	}
     }
    -
    -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
    -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) {
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
    -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta3.FlowSchemaList{}
    -	err = c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested flowSchemas.
    -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) {
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Post().
    -		Resource("flowschemas").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
    -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) {
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) {
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Put().
    -		Resource("flowschemas").
    -		Name(flowSchema.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(flowSchema).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs.
    -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("flowschemas").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched flowSchema.
    -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) {
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Patch(pt).
    -		Resource("flowschemas").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema.
    -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) {
    -	if flowSchema == nil {
    -		return nil, fmt.Errorf("flowSchema provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(flowSchema)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := flowSchema.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("flowSchema.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta3.FlowSchema{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("flowschemas").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
    index 49f05257c9..93842e0cf0 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
    @@ -20,17 +20,14 @@ package v1beta3
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface.
    @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface {
     type PriorityLevelConfigurationInterface interface {
     	Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error)
     	Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error)
     	Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error)
     	PriorityLevelConfigurationExpansion
     }
     
     // priorityLevelConfigurations implements PriorityLevelConfigurationInterface
     type priorityLevelConfigurations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]
     }
     
     // newPriorityLevelConfigurations returns a PriorityLevelConfigurations
     func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations {
     	return &priorityLevelConfigurations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration](
    +			"prioritylevelconfigurations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} },
    +			func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }),
     	}
     }
    -
    -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
    -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
    -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta3.PriorityLevelConfigurationList{}
    -	err = c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
    -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Post().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
    -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Put().
    -		Resource("prioritylevelconfigurations").
    -		Name(priorityLevelConfiguration.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityLevelConfiguration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
    -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("prioritylevelconfigurations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Patch(pt).
    -		Resource("prioritylevelconfigurations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration.
    -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
    -	if priorityLevelConfiguration == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityLevelConfiguration)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := priorityLevelConfiguration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta3.PriorityLevelConfiguration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("prioritylevelconfigurations").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go
    index 002de0dd8a..a9693338b5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go
    @@ -44,22 +44,24 @@ var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress")
     
     // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
     func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) {
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{})
    +		Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
     
     // List takes label and field selectors, and returns the list of Ingresses that match those selectors.
     func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) {
    +	emptyResult := &v1.IngressList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{})
    +		Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (resu
     // Watch returns a watch.Interface that watches the requested ingresses.
     func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) {
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{})
    +		Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
     
     // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{})
    +		Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) {
    +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.Del
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.IngressList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.Delete
     
     // Patch applies the patch and returns the patched ingress.
     func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) {
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.Ingress
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.I
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Ingress), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go
    index 208a975082..cdbd594452 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go
    @@ -43,20 +43,22 @@ var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass")
     
     // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
     func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) {
    +	emptyResult := &v1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.IngressClass), err
     }
     
     // List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
     func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) {
    +	emptyResult := &v1.IngressClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested ingressClasses.
     func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts))
     }
     
     // Create takes the representation of a ingressClass and creates it.  Returns the server's representation of the ingressClass, and an error, if there is any.
     func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) {
    +	emptyResult := &v1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.IngressClass), err
     }
     
     // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
     func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) {
    +	emptyResult := &v1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.IngressClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.IngressClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.D
     
     // Patch applies the patch and returns the patched ingressClass.
     func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) {
    +	emptyResult := &v1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.IngressClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking
     	if name == nil {
     		return nil, fmt.Errorf("ingressClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1.IngressClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.IngressClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
    index dde09774c4..9098bf42e3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
    @@ -44,22 +44,24 @@ var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy")
     
     // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
     func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
    +	emptyResult := &v1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{})
    +		Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.NetworkPolicy), err
     }
     
     // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
     func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
    +	emptyResult := &v1.NetworkPolicyList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{})
    +		Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested networkPolicies.
     func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
     func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) {
    +	emptyResult := &v1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{})
    +		Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.NetworkPolicy), err
     }
     
     // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
     func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
    +	emptyResult := &v1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{})
    +		Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.NetworkPolicy), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts meta
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.
     
     // Patch applies the patch and returns the patched networkPolicy.
     func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) {
    +	emptyResult := &v1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.NetworkPolicy), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networki
     	if name == nil {
     		return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.NetworkPolicy{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1.NetworkPolicy{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.NetworkPolicy), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
    index 9923d6cbae..afaff4912a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/networking/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IngressesGetter has a method to return a IngressInterface.
    @@ -43,6 +40,7 @@ type IngressesGetter interface {
     type IngressInterface interface {
     	Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error)
     	Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type IngressInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error)
     	Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
     	IngressExpansion
     }
     
     // ingresses implements IngressInterface
     type ingresses struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]
     }
     
     // newIngresses returns a Ingresses
     func newIngresses(c *NetworkingV1Client, namespace string) *ingresses {
     	return &ingresses{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration](
    +			"ingresses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Ingress { return &v1.Ingress{} },
    +			func() *v1.IngressList { return &v1.IngressList{} }),
     	}
     }
    -
    -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
    -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) {
    -	result = &v1.Ingress{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
    -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.IngressList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested ingresses.
    -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) {
    -	result = &v1.Ingress{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
    -	result = &v1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
    -	result = &v1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
    -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched ingress.
    -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) {
    -	result = &v1.Ingress{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress.
    -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -	result = &v1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
    index 16c8e48bf0..3301e87994 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/networking/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IngressClassesGetter has a method to return a IngressClassInterface.
    @@ -55,143 +52,18 @@ type IngressClassInterface interface {
     
     // ingressClasses implements IngressClassInterface
     type ingressClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]
     }
     
     // newIngressClasses returns a IngressClasses
     func newIngressClasses(c *NetworkingV1Client) *ingressClasses {
     	return &ingressClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration](
    +			"ingressclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.IngressClass { return &v1.IngressClass{} },
    +			func() *v1.IngressClassList { return &v1.IngressClassList{} }),
     	}
     }
    -
    -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
    -func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) {
    -	result = &v1.IngressClass{}
    -	err = c.client.Get().
    -		Resource("ingressclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
    -func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.IngressClassList{}
    -	err = c.client.Get().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested ingressClasses.
    -func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a ingressClass and creates it.  Returns the server's representation of the ingressClass, and an error, if there is any.
    -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) {
    -	result = &v1.IngressClass{}
    -	err = c.client.Post().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingressClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
    -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) {
    -	result = &v1.IngressClass{}
    -	err = c.client.Put().
    -		Resource("ingressclasses").
    -		Name(ingressClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingressClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs.
    -func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("ingressclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("ingressclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched ingressClass.
    -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) {
    -	result = &v1.IngressClass{}
    -	err = c.client.Patch(pt).
    -		Resource("ingressclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass.
    -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) {
    -	if ingressClass == nil {
    -		return nil, fmt.Errorf("ingressClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingressClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := ingressClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingressClass.Name must be provided to Apply")
    -	}
    -	result = &v1.IngressClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("ingressclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
    index d7454ce145..ba2ef32dbd 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/networking/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
    @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface {
     
     // networkPolicies implements NetworkPolicyInterface
     type networkPolicies struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]
     }
     
     // newNetworkPolicies returns a NetworkPolicies
     func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
     	return &networkPolicies{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration](
    +			"networkpolicies",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} },
    +			func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }),
     	}
     }
    -
    -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
    -func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
    -	result = &v1.NetworkPolicy{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
    -func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.NetworkPolicyList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested networkPolicies.
    -func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
    -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) {
    -	result = &v1.NetworkPolicy{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(networkPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
    -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
    -	result = &v1.NetworkPolicy{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(networkPolicy.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(networkPolicy).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
    -func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched networkPolicy.
    -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) {
    -	result = &v1.NetworkPolicy{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy.
    -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) {
    -	if networkPolicy == nil {
    -		return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(networkPolicy)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := networkPolicy.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
    -	}
    -	result = &v1.NetworkPolicy{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("networkpolicies").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go
    index 4db8df68cb..6ce62b3313 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go
    @@ -43,20 +43,22 @@ var ipaddressesKind = v1alpha1.SchemeGroupVersion.WithKind("IPAddress")
     
     // Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any.
     func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) {
    +	emptyResult := &v1alpha1.IPAddress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(ipaddressesResource, name), &v1alpha1.IPAddress{})
    +		Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.IPAddress), err
     }
     
     // List takes label and field selectors, and returns the list of IPAddresses that match those selectors.
     func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) {
    +	emptyResult := &v1alpha1.IPAddressList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(ipaddressesResource, ipaddressesKind, opts), &v1alpha1.IPAddressList{})
    +		Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested iPAddresses.
     func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(ipaddressesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts))
     }
     
     // Create takes the representation of a iPAddress and creates it.  Returns the server's representation of the iPAddress, and an error, if there is any.
     func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) {
    +	emptyResult := &v1alpha1.IPAddress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{})
    +		Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.IPAddress), err
     }
     
     // Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any.
     func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) {
    +	emptyResult := &v1alpha1.IPAddress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.IPAddress), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.Delet
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(ipaddressesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.IPAddressList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOp
     
     // Patch applies the patch and returns the patched iPAddress.
     func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) {
    +	emptyResult := &v1alpha1.IPAddress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, name, pt, data, subresources...), &v1alpha1.IPAddress{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.IPAddress), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alph
     	if name == nil {
     		return nil, fmt.Errorf("iPAddress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.IPAddress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, *name, types.ApplyPatchType, data), &v1alpha1.IPAddress{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.IPAddress), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go
    index 653ef631af..27a78e1ba6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go
    @@ -43,20 +43,22 @@ var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR")
     
     // Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any.
     func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
     
     // List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors.
     func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDRList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{})
    +		Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested serviceCIDRs.
     func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts))
     }
     
     // Create takes the representation of a serviceCIDR and creates it.  Returns the server's representation of the serviceCIDR, and an error, if there is any.
     func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
     
     // Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
     func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) {
    +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched serviceCIDR.
     func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) {
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1a
     	if name == nil {
     		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *network
     	if name == nil {
     		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ServiceCIDR{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ServiceCIDR), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
    index fff193d68d..33e90d18a3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/networking/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IPAddressesGetter has a method to return a IPAddressInterface.
    @@ -55,143 +52,18 @@ type IPAddressInterface interface {
     
     // iPAddresses implements IPAddressInterface
     type iPAddresses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]
     }
     
     // newIPAddresses returns a IPAddresses
     func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses {
     	return &iPAddresses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration](
    +			"ipaddresses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} },
    +			func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }),
     	}
     }
    -
    -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any.
    -func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) {
    -	result = &v1alpha1.IPAddress{}
    -	err = c.client.Get().
    -		Resource("ipaddresses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors.
    -func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.IPAddressList{}
    -	err = c.client.Get().
    -		Resource("ipaddresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested iPAddresses.
    -func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("ipaddresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a iPAddress and creates it.  Returns the server's representation of the iPAddress, and an error, if there is any.
    -func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) {
    -	result = &v1alpha1.IPAddress{}
    -	err = c.client.Post().
    -		Resource("ipaddresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(iPAddress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any.
    -func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) {
    -	result = &v1alpha1.IPAddress{}
    -	err = c.client.Put().
    -		Resource("ipaddresses").
    -		Name(iPAddress.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(iPAddress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs.
    -func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("ipaddresses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("ipaddresses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched iPAddress.
    -func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) {
    -	result = &v1alpha1.IPAddress{}
    -	err = c.client.Patch(pt).
    -		Resource("ipaddresses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress.
    -func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) {
    -	if iPAddress == nil {
    -		return nil, fmt.Errorf("iPAddress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(iPAddress)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := iPAddress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("iPAddress.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.IPAddress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("ipaddresses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
    index 100f290a19..b72fe5b696 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/networking/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ServiceCIDRsGetter has a method to return a ServiceCIDRInterface.
    @@ -43,6 +40,7 @@ type ServiceCIDRsGetter interface {
     type ServiceCIDRInterface interface {
     	Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error)
     	Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type ServiceCIDRInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error)
     	Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
     	ServiceCIDRExpansion
     }
     
     // serviceCIDRs implements ServiceCIDRInterface
     type serviceCIDRs struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]
     }
     
     // newServiceCIDRs returns a ServiceCIDRs
     func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs {
     	return &serviceCIDRs{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration](
    +			"servicecidrs",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} },
    +			func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }),
     	}
     }
    -
    -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any.
    -func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Get().
    -		Resource("servicecidrs").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors.
    -func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ServiceCIDRList{}
    -	err = c.client.Get().
    -		Resource("servicecidrs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested serviceCIDRs.
    -func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("servicecidrs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a serviceCIDR and creates it.  Returns the server's representation of the serviceCIDR, and an error, if there is any.
    -func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Post().
    -		Resource("servicecidrs").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(serviceCIDR).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
    -func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Put().
    -		Resource("servicecidrs").
    -		Name(serviceCIDR.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(serviceCIDR).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Put().
    -		Resource("servicecidrs").
    -		Name(serviceCIDR.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(serviceCIDR).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs.
    -func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("servicecidrs").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("servicecidrs").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched serviceCIDR.
    -func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) {
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Patch(pt).
    -		Resource("servicecidrs").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR.
    -func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	if serviceCIDR == nil {
    -		return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(serviceCIDR)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := serviceCIDR.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("servicecidrs").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
    -	if serviceCIDR == nil {
    -		return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(serviceCIDR)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := serviceCIDR.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha1.ServiceCIDR{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("servicecidrs").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go
    index 7a3b861be0..59bf762a01 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go
    @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress")
     
     // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
     func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{})
    +		Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // List takes label and field selectors, and returns the list of Ingresses that match those selectors.
     func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
    +	emptyResult := &v1beta1.IngressList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{})
    +		Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *
     // Watch returns a watch.Interface that watches the requested ingresses.
     func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
     func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) {
    +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.IngressList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti
     
     // Patch applies the patch and returns the patched ingress.
     func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1beta1.In
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1be
     	if name == nil {
     		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Ingress{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Ingress), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go
    index 1804e61fc3..3001de8e42 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go
    @@ -43,20 +43,22 @@ var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass")
     
     // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
     func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) {
    +	emptyResult := &v1beta1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1beta1.IngressClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.IngressClass), err
     }
     
     // List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
     func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) {
    +	emptyResult := &v1beta1.IngressClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1beta1.IngressClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested ingressClasses.
     func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts))
     }
     
     // Create takes the representation of a ingressClass and creates it.  Returns the server's representation of the ingressClass, and an error, if there is any.
     func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) {
    +	emptyResult := &v1beta1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.IngressClass), err
     }
     
     // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
     func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) {
    +	emptyResult := &v1beta1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.IngressClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.IngressClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.Delet
     
     // Patch applies the patch and returns the patched ingressClass.
     func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) {
    +	emptyResult := &v1beta1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1beta1.IngressClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.IngressClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking
     	if name == nil {
     		return nil, fmt.Errorf("ingressClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.IngressClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1beta1.IngressClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.IngressClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go
    new file mode 100644
    index 0000000000..d8352bb79f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go
    @@ -0,0 +1,151 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeIPAddresses implements IPAddressInterface
    +type FakeIPAddresses struct {
    +	Fake *FakeNetworkingV1beta1
    +}
    +
    +var ipaddressesResource = v1beta1.SchemeGroupVersion.WithResource("ipaddresses")
    +
    +var ipaddressesKind = v1beta1.SchemeGroupVersion.WithKind("IPAddress")
    +
    +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any.
    +func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IPAddress, err error) {
    +	emptyResult := &v1beta1.IPAddress{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.IPAddress), err
    +}
    +
    +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors.
    +func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IPAddressList, err error) {
    +	emptyResult := &v1beta1.IPAddressList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1beta1.IPAddressList{ListMeta: obj.(*v1beta1.IPAddressList).ListMeta}
    +	for _, item := range obj.(*v1beta1.IPAddressList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested iPAddresses.
    +func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts))
    +}
    +
    +// Create takes the representation of a iPAddress and creates it.  Returns the server's representation of the iPAddress, and an error, if there is any.
    +func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (result *v1beta1.IPAddress, err error) {
    +	emptyResult := &v1beta1.IPAddress{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.IPAddress), err
    +}
    +
    +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any.
    +func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (result *v1beta1.IPAddress, err error) {
    +	emptyResult := &v1beta1.IPAddress{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.IPAddress), err
    +}
    +
    +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs.
    +func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1beta1.IPAddress{})
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1beta1.IPAddressList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched iPAddress.
    +func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) {
    +	emptyResult := &v1beta1.IPAddress{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.IPAddress), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress.
    +func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) {
    +	if iPAddress == nil {
    +		return nil, fmt.Errorf("iPAddress provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(iPAddress)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := iPAddress.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("iPAddress.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1beta1.IPAddress{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.IPAddress), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go
    index b8792a3064..bd72d59297 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go
    @@ -28,6 +28,10 @@ type FakeNetworkingV1beta1 struct {
     	*testing.Fake
     }
     
    +func (c *FakeNetworkingV1beta1) IPAddresses() v1beta1.IPAddressInterface {
    +	return &FakeIPAddresses{c}
    +}
    +
     func (c *FakeNetworkingV1beta1) Ingresses(namespace string) v1beta1.IngressInterface {
     	return &FakeIngresses{c, namespace}
     }
    @@ -36,6 +40,10 @@ func (c *FakeNetworkingV1beta1) IngressClasses() v1beta1.IngressClassInterface {
     	return &FakeIngressClasses{c}
     }
     
    +func (c *FakeNetworkingV1beta1) ServiceCIDRs() v1beta1.ServiceCIDRInterface {
    +	return &FakeServiceCIDRs{c}
    +}
    +
     // RESTClient returns a RESTClient that is used to communicate
     // with API server by this client implementation.
     func (c *FakeNetworkingV1beta1) RESTClient() rest.Interface {
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go
    new file mode 100644
    index 0000000000..0eb5b2f2bb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go
    @@ -0,0 +1,186 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeServiceCIDRs implements ServiceCIDRInterface
    +type FakeServiceCIDRs struct {
    +	Fake *FakeNetworkingV1beta1
    +}
    +
    +var servicecidrsResource = v1beta1.SchemeGroupVersion.WithResource("servicecidrs")
    +
    +var servicecidrsKind = v1beta1.SchemeGroupVersion.WithKind("ServiceCIDR")
    +
    +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any.
    +func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors.
    +func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ServiceCIDRList, err error) {
    +	emptyResult := &v1beta1.ServiceCIDRList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1beta1.ServiceCIDRList{ListMeta: obj.(*v1beta1.ServiceCIDRList).ListMeta}
    +	for _, item := range obj.(*v1beta1.ServiceCIDRList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested serviceCIDRs.
    +func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts))
    +}
    +
    +// Create takes the representation of a serviceCIDR and creates it.  Returns the server's representation of the serviceCIDR, and an error, if there is any.
    +func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
    +func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// UpdateStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs.
    +func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1beta1.ServiceCIDR{})
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1beta1.ServiceCIDRList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched serviceCIDR.
    +func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) {
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR.
    +func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	if serviceCIDR == nil {
    +		return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(serviceCIDR)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := serviceCIDR.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    +
    +// ApplyStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) {
    +	if serviceCIDR == nil {
    +		return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(serviceCIDR)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := serviceCIDR.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1beta1.ServiceCIDR{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.ServiceCIDR), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
    index f74c7257ad..ac1ffbb984 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
    @@ -18,6 +18,10 @@ limitations under the License.
     
     package v1beta1
     
    +type IPAddressExpansion interface{}
    +
     type IngressExpansion interface{}
     
     type IngressClassExpansion interface{}
    +
    +type ServiceCIDRExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
    index b309281afa..90be275adc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/networking/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IngressesGetter has a method to return a IngressInterface.
    @@ -43,6 +40,7 @@ type IngressesGetter interface {
     type IngressInterface interface {
     	Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
     	Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type IngressInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
     	Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
     	IngressExpansion
     }
     
     // ingresses implements IngressInterface
     type ingresses struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]
     }
     
     // newIngresses returns a Ingresses
     func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses {
     	return &ingresses{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration](
    +			"ingresses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Ingress { return &v1beta1.Ingress{} },
    +			func() *v1beta1.IngressList { return &v1beta1.IngressList{} }),
     	}
     }
    -
    -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
    -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
    -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.IngressList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested ingresses.
    -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
    -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(ingress.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingress).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
    -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched ingress.
    -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress.
    -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) {
    -	if ingress == nil {
    -		return nil, fmt.Errorf("ingress provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingress)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := ingress.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingress.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.Ingress{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("ingresses").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
    index 50ccdfdbba..c55da4168f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/networking/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // IngressClassesGetter has a method to return a IngressClassInterface.
    @@ -55,143 +52,18 @@ type IngressClassInterface interface {
     
     // ingressClasses implements IngressClassInterface
     type ingressClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]
     }
     
     // newIngressClasses returns a IngressClasses
     func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses {
     	return &ingressClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration](
    +			"ingressclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} },
    +			func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }),
     	}
     }
    -
    -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
    -func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) {
    -	result = &v1beta1.IngressClass{}
    -	err = c.client.Get().
    -		Resource("ingressclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
    -func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.IngressClassList{}
    -	err = c.client.Get().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested ingressClasses.
    -func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a ingressClass and creates it.  Returns the server's representation of the ingressClass, and an error, if there is any.
    -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) {
    -	result = &v1beta1.IngressClass{}
    -	err = c.client.Post().
    -		Resource("ingressclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingressClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
    -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) {
    -	result = &v1beta1.IngressClass{}
    -	err = c.client.Put().
    -		Resource("ingressclasses").
    -		Name(ingressClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(ingressClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs.
    -func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("ingressclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("ingressclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched ingressClass.
    -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) {
    -	result = &v1beta1.IngressClass{}
    -	err = c.client.Patch(pt).
    -		Resource("ingressclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass.
    -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) {
    -	if ingressClass == nil {
    -		return nil, fmt.Errorf("ingressClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(ingressClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := ingressClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("ingressClass.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.IngressClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("ingressclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go
    new file mode 100644
    index 0000000000..09e4139e74
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// IPAddressesGetter has a method to return a IPAddressInterface.
    +// A group's client should implement this interface.
    +type IPAddressesGetter interface {
    +	IPAddresses() IPAddressInterface
    +}
    +
    +// IPAddressInterface has methods to work with IPAddress resources.
    +type IPAddressInterface interface {
    +	Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error)
    +	Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error)
    +	Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error)
    +	IPAddressExpansion
    +}
    +
    +// iPAddresses implements IPAddressInterface
    +type iPAddresses struct {
    +	*gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]
    +}
    +
    +// newIPAddresses returns a IPAddresses
    +func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses {
    +	return &iPAddresses{
    +		gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration](
    +			"ipaddresses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} },
    +			func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
    index 851634ed0f..d35225abd8 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
    @@ -28,8 +28,10 @@ import (
     
     type NetworkingV1beta1Interface interface {
     	RESTClient() rest.Interface
    +	IPAddressesGetter
     	IngressesGetter
     	IngressClassesGetter
    +	ServiceCIDRsGetter
     }
     
     // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group.
    @@ -37,6 +39,10 @@ type NetworkingV1beta1Client struct {
     	restClient rest.Interface
     }
     
    +func (c *NetworkingV1beta1Client) IPAddresses() IPAddressInterface {
    +	return newIPAddresses(c)
    +}
    +
     func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface {
     	return newIngresses(c, namespace)
     }
    @@ -45,6 +51,10 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface {
     	return newIngressClasses(c)
     }
     
    +func (c *NetworkingV1beta1Client) ServiceCIDRs() ServiceCIDRInterface {
    +	return newServiceCIDRs(c)
    +}
    +
     // NewForConfig creates a new NetworkingV1beta1Client for the given config.
     // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
     // where httpClient was generated with rest.HTTPClientFor(c).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go
    new file mode 100644
    index 0000000000..d3336f2ec0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface.
    +// A group's client should implement this interface.
    +type ServiceCIDRsGetter interface {
    +	ServiceCIDRs() ServiceCIDRInterface
    +}
    +
    +// ServiceCIDRInterface has methods to work with ServiceCIDR resources.
    +type ServiceCIDRInterface interface {
    +	Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error)
    +	Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +	UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error)
    +	Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +	ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error)
    +	ServiceCIDRExpansion
    +}
    +
    +// serviceCIDRs implements ServiceCIDRInterface
    +type serviceCIDRs struct {
    +	*gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]
    +}
    +
    +// newServiceCIDRs returns a ServiceCIDRs
    +func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs {
    +	return &serviceCIDRs{
    +		gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration](
    +			"servicecidrs",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} },
    +			func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go
    index 35cfbcae4b..0a52706284 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go
    @@ -43,20 +43,22 @@ var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass")
     
     // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
     func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) {
    +	emptyResult := &v1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1.RuntimeClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RuntimeClass), err
     }
     
     // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
     func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) {
    +	emptyResult := &v1.RuntimeClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1.RuntimeClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested runtimeClasses.
     func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts))
     }
     
     // Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) {
    +	emptyResult := &v1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RuntimeClass), err
     }
     
     // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) {
    +	emptyResult := &v1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RuntimeClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.RuntimeClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.D
     
     // Patch applies the patch and returns the patched runtimeClass.
     func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) {
    +	emptyResult := &v1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RuntimeClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.Run
     	if name == nil {
     		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RuntimeClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
    index 5ec38b203e..6c8110640d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/node/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	nodev1 "k8s.io/client-go/applyconfigurations/node/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RuntimeClassesGetter has a method to return a RuntimeClassInterface.
    @@ -55,143 +52,18 @@ type RuntimeClassInterface interface {
     
     // runtimeClasses implements RuntimeClassInterface
     type runtimeClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]
     }
     
     // newRuntimeClasses returns a RuntimeClasses
     func newRuntimeClasses(c *NodeV1Client) *runtimeClasses {
     	return &runtimeClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration](
    +			"runtimeclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.RuntimeClass { return &v1.RuntimeClass{} },
    +			func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }),
     	}
     }
    -
    -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
    -func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) {
    -	result = &v1.RuntimeClass{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
    -func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.RuntimeClassList{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested runtimeClasses.
    -func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) {
    -	result = &v1.RuntimeClass{}
    -	err = c.client.Post().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) {
    -	result = &v1.RuntimeClass{}
    -	err = c.client.Put().
    -		Resource("runtimeclasses").
    -		Name(runtimeClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs.
    -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched runtimeClass.
    -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) {
    -	result = &v1.RuntimeClass{}
    -	err = c.client.Patch(pt).
    -		Resource("runtimeclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass.
    -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) {
    -	if runtimeClass == nil {
    -		return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(runtimeClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := runtimeClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
    -	}
    -	result = &v1.RuntimeClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("runtimeclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go
    index 2ff7d3f973..bcd261d003 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go
    @@ -43,20 +43,22 @@ var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass")
     
     // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
     func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) {
    +	emptyResult := &v1alpha1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RuntimeClass), err
     }
     
     // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
     func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) {
    +	emptyResult := &v1alpha1.RuntimeClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested runtimeClasses.
     func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts))
     }
     
     // Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) {
    +	emptyResult := &v1alpha1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RuntimeClass), err
     }
     
     // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) {
    +	emptyResult := &v1alpha1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RuntimeClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet
     
     // Patch applies the patch and returns the patched runtimeClass.
     func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) {
    +	emptyResult := &v1alpha1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RuntimeClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alph
     	if name == nil {
     		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RuntimeClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
    index 039a7ace15..60aa4a213b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/node/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RuntimeClassesGetter has a method to return a RuntimeClassInterface.
    @@ -55,143 +52,18 @@ type RuntimeClassInterface interface {
     
     // runtimeClasses implements RuntimeClassInterface
     type runtimeClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]
     }
     
     // newRuntimeClasses returns a RuntimeClasses
     func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses {
     	return &runtimeClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration](
    +			"runtimeclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} },
    +			func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }),
     	}
     }
    -
    -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
    -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) {
    -	result = &v1alpha1.RuntimeClass{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
    -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.RuntimeClassList{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested runtimeClasses.
    -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) {
    -	result = &v1alpha1.RuntimeClass{}
    -	err = c.client.Post().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) {
    -	result = &v1alpha1.RuntimeClass{}
    -	err = c.client.Put().
    -		Resource("runtimeclasses").
    -		Name(runtimeClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs.
    -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched runtimeClass.
    -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) {
    -	result = &v1alpha1.RuntimeClass{}
    -	err = c.client.Patch(pt).
    -		Resource("runtimeclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass.
    -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) {
    -	if runtimeClass == nil {
    -		return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(runtimeClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := runtimeClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.RuntimeClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("runtimeclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go
    index e6552f9aca..a3c8c018c5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go
    @@ -43,20 +43,22 @@ var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass")
     
     // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
     func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) {
    +	emptyResult := &v1beta1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RuntimeClass), err
     }
     
     // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
     func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) {
    +	emptyResult := &v1beta1.RuntimeClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested runtimeClasses.
     func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts))
     }
     
     // Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) {
    +	emptyResult := &v1beta1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RuntimeClass), err
     }
     
     // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
     func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) {
    +	emptyResult := &v1beta1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RuntimeClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet
     
     // Patch applies the patch and returns the patched runtimeClass.
     func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) {
    +	emptyResult := &v1beta1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RuntimeClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta
     	if name == nil {
     		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.RuntimeClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1beta1.RuntimeClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RuntimeClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
    index f8990adf1e..8e15d52889 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/node/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RuntimeClassesGetter has a method to return a RuntimeClassInterface.
    @@ -55,143 +52,18 @@ type RuntimeClassInterface interface {
     
     // runtimeClasses implements RuntimeClassInterface
     type runtimeClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]
     }
     
     // newRuntimeClasses returns a RuntimeClasses
     func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses {
     	return &runtimeClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration](
    +			"runtimeclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} },
    +			func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }),
     	}
     }
    -
    -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
    -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) {
    -	result = &v1beta1.RuntimeClass{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
    -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.RuntimeClassList{}
    -	err = c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested runtimeClasses.
    -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) {
    -	result = &v1beta1.RuntimeClass{}
    -	err = c.client.Post().
    -		Resource("runtimeclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
    -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) {
    -	result = &v1beta1.RuntimeClass{}
    -	err = c.client.Put().
    -		Resource("runtimeclasses").
    -		Name(runtimeClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(runtimeClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs.
    -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("runtimeclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched runtimeClass.
    -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) {
    -	result = &v1beta1.RuntimeClass{}
    -	err = c.client.Patch(pt).
    -		Resource("runtimeclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass.
    -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) {
    -	if runtimeClass == nil {
    -		return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(runtimeClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := runtimeClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.RuntimeClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("runtimeclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
    index cd1aac9c29..22173d36de 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
    @@ -19,7 +19,9 @@ limitations under the License.
     package v1
     
     import (
    -	rest "k8s.io/client-go/rest"
    +	v1 "k8s.io/api/policy/v1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
     )
     
     // EvictionsGetter has a method to return a EvictionInterface.
    @@ -35,14 +37,17 @@ type EvictionInterface interface {
     
     // evictions implements EvictionInterface
     type evictions struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.Client[*v1.Eviction]
     }
     
     // newEvictions returns a Evictions
     func newEvictions(c *PolicyV1Client, namespace string) *evictions {
     	return &evictions{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClient[*v1.Eviction](
    +			"evictions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Eviction { return &v1.Eviction{} }),
     	}
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go
    index 853187feb5..2c7e95b720 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go
    @@ -28,7 +28,7 @@ type EvictionExpansion interface {
     }
     
     func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error {
    -	return c.client.Post().
    +	return c.GetClient().Post().
     		AbsPath("/api/v1").
     		Namespace(eviction.Namespace).
     		Resource("pods").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go
    index 7b5f51caf4..de2bcc1b09 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go
    @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudg
     
     // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
     func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
     
     // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
     func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) {
    +	emptyResult := &v1.PodDisruptionBudgetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1.PodDisruptionBudgetList{})
    +		Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOpt
     // Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
     func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
     func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
     
     // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
     func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) {
    +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
    @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts me
     
     // Patch applies the patch and returns the patched podDisruptionBudget.
     func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
    @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge
     	if name == nil {
     		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
    @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio
     	if name == nil {
     		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PodDisruptionBudget), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
    index 58db3acf9e..6d011cbce2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/policy/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	policyv1 "k8s.io/client-go/applyconfigurations/policy/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface.
    @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface {
     type PodDisruptionBudgetInterface interface {
     	Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error)
     	Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error)
     	Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error)
     	PodDisruptionBudgetExpansion
     }
     
     // podDisruptionBudgets implements PodDisruptionBudgetInterface
     type podDisruptionBudgets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]
     }
     
     // newPodDisruptionBudgets returns a PodDisruptionBudgets
     func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets {
     	return &podDisruptionBudgets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration](
    +			"poddisruptionbudgets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} },
    +			func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }),
     	}
     }
    -
    -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
    -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) {
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
    -func (c *podDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PodDisruptionBudgetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
    -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
    -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) {
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
    -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) {
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(podDisruptionBudget.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) {
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(podDisruptionBudget.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs.
    -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched podDisruptionBudget.
    -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) {
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget.
    -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) {
    -	if podDisruptionBudget == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podDisruptionBudget)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podDisruptionBudget.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
    -	}
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) {
    -	if podDisruptionBudget == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podDisruptionBudget)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := podDisruptionBudget.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.PodDisruptionBudget{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
    index 12e8e76edc..e003ece6bd 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
    @@ -19,7 +19,9 @@ limitations under the License.
     package v1beta1
     
     import (
    -	rest "k8s.io/client-go/rest"
    +	v1beta1 "k8s.io/api/policy/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
     )
     
     // EvictionsGetter has a method to return a EvictionInterface.
    @@ -35,14 +37,17 @@ type EvictionInterface interface {
     
     // evictions implements EvictionInterface
     type evictions struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.Client[*v1beta1.Eviction]
     }
     
     // newEvictions returns a Evictions
     func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions {
     	return &evictions{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClient[*v1beta1.Eviction](
    +			"evictions",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Eviction { return &v1beta1.Eviction{} }),
     	}
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
    index c003671f5d..d7c28987cf 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
    @@ -28,7 +28,7 @@ type EvictionExpansion interface {
     }
     
     func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error {
    -	return c.client.Post().
    +	return c.GetClient().Post().
     		AbsPath("/api/v1").
     		Namespace(eviction.Namespace).
     		Resource("pods").
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
    index bcee8e7774..fbd9d01e07 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
    @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptio
     
     // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
     func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
     
     // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
     func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudgetList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{})
    +		Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions
     // Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
     func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
     func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
     
     // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
     func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) {
    +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
    @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{})
     	return err
    @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1
     
     // Patch applies the patch and returns the patched podDisruptionBudget.
     func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
    @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge
     	if name == nil {
     		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
    @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio
     	if name == nil {
     		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.PodDisruptionBudget{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.PodDisruptionBudget{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PodDisruptionBudget), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
    index 1687289921..4111812376 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/policy/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface.
    @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface {
     type PodDisruptionBudgetInterface interface {
     	Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error)
     	Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error)
     	Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error)
     	PodDisruptionBudgetExpansion
     }
     
     // podDisruptionBudgets implements PodDisruptionBudgetInterface
     type podDisruptionBudgets struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]
     }
     
     // newPodDisruptionBudgets returns a PodDisruptionBudgets
     func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets {
     	return &podDisruptionBudgets{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration](
    +			"poddisruptionbudgets",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} },
    +			func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }),
     	}
     }
    -
    -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
    -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
    -func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.PodDisruptionBudgetList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
    -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
    -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
    -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(podDisruptionBudget.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(podDisruptionBudget.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podDisruptionBudget).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs.
    -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched podDisruptionBudget.
    -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget.
    -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	if podDisruptionBudget == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podDisruptionBudget)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podDisruptionBudget.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) {
    -	if podDisruptionBudget == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podDisruptionBudget)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := podDisruptionBudget.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.PodDisruptionBudget{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("poddisruptionbudgets").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
    index 000d737f0f..19fff0ee47 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/rbac/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRolesGetter has a method to return a ClusterRoleInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleInterface interface {
     
     // clusterRoles implements ClusterRoleInterface
     type clusterRoles struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]
     }
     
     // newClusterRoles returns a ClusterRoles
     func newClusterRoles(c *RbacV1Client) *clusterRoles {
     	return &clusterRoles{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration](
    +			"clusterroles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ClusterRole { return &v1.ClusterRole{} },
    +			func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
    -func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) {
    -	result = &v1.ClusterRole{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
    -func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ClusterRoleList{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoles.
    -func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) {
    -	result = &v1.ClusterRole{}
    -	err = c.client.Post().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) {
    -	result = &v1.ClusterRole{}
    -	err = c.client.Put().
    -		Resource("clusterroles").
    -		Name(clusterRole.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
    -func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRole.
    -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) {
    -	result = &v1.ClusterRole{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterroles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole.
    -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) {
    -	if clusterRole == nil {
    -		return nil, fmt.Errorf("clusterRole provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRole)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRole.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
    -	}
    -	result = &v1.ClusterRole{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterroles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
    index 31db43d984..77fb3785e4 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/rbac/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface {
     
     // clusterRoleBindings implements ClusterRoleBindingInterface
     type clusterRoleBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]
     }
     
     // newClusterRoleBindings returns a ClusterRoleBindings
     func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings {
     	return &clusterRoleBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration](
    +			"clusterrolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} },
    +			func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
    -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) {
    -	result = &v1.ClusterRoleBinding{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
    -func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.ClusterRoleBindingList{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
    -func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) {
    -	result = &v1.ClusterRoleBinding{}
    -	err = c.client.Post().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) {
    -	result = &v1.ClusterRoleBinding{}
    -	err = c.client.Put().
    -		Resource("clusterrolebindings").
    -		Name(clusterRoleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
    -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRoleBinding.
    -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) {
    -	result = &v1.ClusterRoleBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding.
    -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) {
    -	if clusterRoleBinding == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRoleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRoleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1.ClusterRoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterrolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
    index 5add33ddfb..6df91b1a86 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
    @@ -43,20 +43,22 @@ var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole")
     
     // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
     func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) {
    +	emptyResult := &v1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1.ClusterRole{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRole), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
     func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) {
    +	emptyResult := &v1.ClusterRoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1.ClusterRoleList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (r
     // Watch returns a watch.Interface that watches the requested clusterRoles.
     func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts))
     }
     
     // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) {
    +	emptyResult := &v1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRole), err
     }
     
     // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) {
    +	emptyResult := &v1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRole), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1.
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ClusterRoleList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.Del
     
     // Patch applies the patch and returns the patched clusterRole.
     func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) {
    +	emptyResult := &v1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRole), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.Cluste
     	if name == nil {
     		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRole), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
    index d42e93e653..6f3251408c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
    @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding
     
     // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
     func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
     func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) {
    +	emptyResult := &v1.ClusterRoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1.ClusterRoleBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOpti
     // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
     func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts))
     }
     
     // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRoleBinding), err
     }
     
     // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRoleBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts met
     
     // Patch applies the patch and returns the patched clusterRoleBinding.
     func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRoleBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding
     	if name == nil {
     		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.ClusterRoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
    index a3bc5da663..ba9161940b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
    @@ -44,22 +44,24 @@ var rolesKind = v1.SchemeGroupVersion.WithKind("Role")
     
     // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
     func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) {
    +	emptyResult := &v1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1.Role{})
    +		Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Role), err
     }
     
     // List takes label and field selectors, and returns the list of Roles that match those selectors.
     func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) {
    +	emptyResult := &v1.RoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1.RoleList{})
    +		Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *
     // Watch returns a watch.Interface that watches the requested roles.
     func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) {
    +	emptyResult := &v1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1.Role{})
    +		Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Role), err
     }
     
     // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) {
    +	emptyResult := &v1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1.Role{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Role), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteO
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.RoleList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti
     
     // Patch applies the patch and returns the patched role.
     func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) {
    +	emptyResult := &v1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Role), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfigurati
     	if name == nil {
     		return nil, fmt.Errorf("role.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.Role), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
    index eeb37e9db3..6d7d7d1933 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
    @@ -44,22 +44,24 @@ var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding")
     
     // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
     func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) {
    +	emptyResult := &v1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1.RoleBinding{})
    +		Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
     func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) {
    +	emptyResult := &v1.RoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1.RoleBindingList{})
    +		Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (r
     // Watch returns a watch.Interface that watches the requested roleBindings.
     func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) {
    +	emptyResult := &v1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{})
    +		Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RoleBinding), err
     }
     
     // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) {
    +	emptyResult := &v1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RoleBinding), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1.
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.RoleBindingList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.Del
     
     // Patch applies the patch and returns the patched roleBinding.
     func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) {
    +	emptyResult := &v1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RoleBinding), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBi
     	if name == nil {
     		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.RoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
    index 93810a3ffa..b75b055f07 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/rbac/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RolesGetter has a method to return a RoleInterface.
    @@ -55,154 +52,18 @@ type RoleInterface interface {
     
     // roles implements RoleInterface
     type roles struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]
     }
     
     // newRoles returns a Roles
     func newRoles(c *RbacV1Client, namespace string) *roles {
     	return &roles{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration](
    +			"roles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.Role { return &v1.Role{} },
    +			func() *v1.RoleList { return &v1.RoleList{} }),
     	}
     }
    -
    -// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
    -func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) {
    -	result = &v1.Role{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Roles that match those selectors.
    -func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.RoleList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roles.
    -func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) {
    -	result = &v1.Role{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) {
    -	result = &v1.Role{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(role.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the role and deletes it. Returns an error if one occurs.
    -func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched role.
    -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) {
    -	result = &v1.Role{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied role.
    -func (c *roles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) {
    -	if role == nil {
    -		return nil, fmt.Errorf("role provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(role)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := role.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("role.Name must be provided to Apply")
    -	}
    -	result = &v1.Role{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
    index 2ace938604..fcbb1c0e26 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/rbac/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RoleBindingsGetter has a method to return a RoleBindingInterface.
    @@ -55,154 +52,18 @@ type RoleBindingInterface interface {
     
     // roleBindings implements RoleBindingInterface
     type roleBindings struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]
     }
     
     // newRoleBindings returns a RoleBindings
     func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings {
     	return &roleBindings{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration](
    +			"rolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.RoleBinding { return &v1.RoleBinding{} },
    +			func() *v1.RoleBindingList { return &v1.RoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
    -func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) {
    -	result = &v1.RoleBinding{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
    -func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.RoleBindingList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roleBindings.
    -func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) {
    -	result = &v1.RoleBinding{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) {
    -	result = &v1.RoleBinding{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(roleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
    -func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched roleBinding.
    -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) {
    -	result = &v1.RoleBinding{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding.
    -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) {
    -	if roleBinding == nil {
    -		return nil, fmt.Errorf("roleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(roleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := roleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1.RoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
    index d6d30e99ef..f91e2c50a1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/rbac/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRolesGetter has a method to return a ClusterRoleInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleInterface interface {
     
     // clusterRoles implements ClusterRoleInterface
     type clusterRoles struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]
     }
     
     // newClusterRoles returns a ClusterRoles
     func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles {
     	return &clusterRoles{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration](
    +			"clusterroles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} },
    +			func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
    -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
    -	result = &v1alpha1.ClusterRole{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
    -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ClusterRoleList{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoles.
    -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) {
    -	result = &v1alpha1.ClusterRole{}
    -	err = c.client.Post().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) {
    -	result = &v1alpha1.ClusterRole{}
    -	err = c.client.Put().
    -		Resource("clusterroles").
    -		Name(clusterRole.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
    -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRole.
    -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
    -	result = &v1alpha1.ClusterRole{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterroles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole.
    -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) {
    -	if clusterRole == nil {
    -		return nil, fmt.Errorf("clusterRole provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRole)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRole.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ClusterRole{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterroles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
    index 2eded92ac2..3f04526f0b 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/rbac/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface {
     
     // clusterRoleBindings implements ClusterRoleBindingInterface
     type clusterRoleBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]
     }
     
     // newClusterRoleBindings returns a ClusterRoleBindings
     func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings {
     	return &clusterRoleBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration](
    +			"clusterrolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} },
    +			func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
    -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    -	result = &v1alpha1.ClusterRoleBinding{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
    -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.ClusterRoleBindingList{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
    -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    -	result = &v1alpha1.ClusterRoleBinding{}
    -	err = c.client.Post().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    -	result = &v1alpha1.ClusterRoleBinding{}
    -	err = c.client.Put().
    -		Resource("clusterrolebindings").
    -		Name(clusterRoleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
    -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRoleBinding.
    -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
    -	result = &v1alpha1.ClusterRoleBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding.
    -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    -	if clusterRoleBinding == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRoleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRoleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.ClusterRoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterrolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
    index 534a1990f5..34c9a853ea 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
    @@ -43,20 +43,22 @@ var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole")
     
     // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
     func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
    +	emptyResult := &v1alpha1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRole), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
     func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested clusterRoles.
     func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts))
     }
     
     // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) {
    +	emptyResult := &v1alpha1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRole), err
     }
     
     // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) {
    +	emptyResult := &v1alpha1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRole), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched clusterRole.
     func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
    +	emptyResult := &v1alpha1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRole), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.
     	if name == nil {
     		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRole), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
    index 0a4359392d..d42f763421 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
    @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleB
     
     // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
     func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
     func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions)
     // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
     func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts))
     }
     
     // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRoleBinding), err
     }
     
     // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRoleBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.
     
     // Patch applies the patch and returns the patched clusterRoleBinding.
     func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1alpha1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRoleBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding
     	if name == nil {
     		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.ClusterRoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
    index a0e28348ae..9b0ba7cac6 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
    @@ -44,22 +44,24 @@ var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role")
     
     // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
     func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
    +	emptyResult := &v1alpha1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{})
    +		Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.Role), err
     }
     
     // List takes label and field selectors, and returns the list of Roles that match those selectors.
     func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
    +	emptyResult := &v1alpha1.RoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{})
    +		Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1al
     // Watch returns a watch.Interface that watches the requested roles.
     func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) {
    +	emptyResult := &v1alpha1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{})
    +		Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.Role), err
     }
     
     // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) {
    +	emptyResult := &v1alpha1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.Role), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.RoleList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions,
     
     // Patch applies the patch and returns the patched role.
     func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) {
    +	emptyResult := &v1alpha1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.Role), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfi
     	if name == nil {
     		return nil, fmt.Errorf("role.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.Role), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
    index 76649f5c2b..f572945aca 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
    @@ -44,22 +44,24 @@ var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding")
     
     // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
     func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
    +	emptyResult := &v1alpha1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{})
    +		Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
     func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
    +	emptyResult := &v1alpha1.RoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{})
    +		Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested roleBindings.
     func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) {
    +	emptyResult := &v1alpha1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{})
    +		Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RoleBinding), err
     }
     
     // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) {
    +	emptyResult := &v1alpha1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RoleBinding), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched roleBinding.
     func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
    +	emptyResult := &v1alpha1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RoleBinding), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.
     	if name == nil {
     		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.RoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
    index 43c16fde74..4a1876a7d5 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/rbac/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RolesGetter has a method to return a RoleInterface.
    @@ -55,154 +52,18 @@ type RoleInterface interface {
     
     // roles implements RoleInterface
     type roles struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]
     }
     
     // newRoles returns a Roles
     func newRoles(c *RbacV1alpha1Client, namespace string) *roles {
     	return &roles{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration](
    +			"roles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha1.Role { return &v1alpha1.Role{} },
    +			func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }),
     	}
     }
    -
    -// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
    -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
    -	result = &v1alpha1.Role{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Roles that match those selectors.
    -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.RoleList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roles.
    -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) {
    -	result = &v1alpha1.Role{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) {
    -	result = &v1alpha1.Role{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(role.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the role and deletes it. Returns an error if one occurs.
    -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched role.
    -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) {
    -	result = &v1alpha1.Role{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied role.
    -func (c *roles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) {
    -	if role == nil {
    -		return nil, fmt.Errorf("role provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(role)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := role.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("role.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.Role{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
    index 3129c9b4e8..6473132f1c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/rbac/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RoleBindingsGetter has a method to return a RoleBindingInterface.
    @@ -55,154 +52,18 @@ type RoleBindingInterface interface {
     
     // roleBindings implements RoleBindingInterface
     type roleBindings struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]
     }
     
     // newRoleBindings returns a RoleBindings
     func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings {
     	return &roleBindings{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration](
    +			"rolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} },
    +			func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
    -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
    -	result = &v1alpha1.RoleBinding{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
    -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.RoleBindingList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roleBindings.
    -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) {
    -	result = &v1alpha1.RoleBinding{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) {
    -	result = &v1alpha1.RoleBinding{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(roleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
    -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched roleBinding.
    -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
    -	result = &v1alpha1.RoleBinding{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding.
    -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) {
    -	if roleBinding == nil {
    -		return nil, fmt.Errorf("roleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(roleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := roleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.RoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
    index a3d67f0315..ed398333a1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/rbac/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRolesGetter has a method to return a ClusterRoleInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleInterface interface {
     
     // clusterRoles implements ClusterRoleInterface
     type clusterRoles struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]
     }
     
     // newClusterRoles returns a ClusterRoles
     func newClusterRoles(c *RbacV1beta1Client) *clusterRoles {
     	return &clusterRoles{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration](
    +			"clusterroles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} },
    +			func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
    -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
    -	result = &v1beta1.ClusterRole{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
    -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ClusterRoleList{}
    -	err = c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoles.
    -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) {
    -	result = &v1beta1.ClusterRole{}
    -	err = c.client.Post().
    -		Resource("clusterroles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
    -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) {
    -	result = &v1beta1.ClusterRole{}
    -	err = c.client.Put().
    -		Resource("clusterroles").
    -		Name(clusterRole.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRole).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
    -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterroles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRole.
    -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) {
    -	result = &v1beta1.ClusterRole{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterroles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole.
    -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) {
    -	if clusterRole == nil {
    -		return nil, fmt.Errorf("clusterRole provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRole)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRole.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ClusterRole{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterroles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
    index ae39cbb9ae..3010a99ae2 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/rbac/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
    @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface {
     
     // clusterRoleBindings implements ClusterRoleBindingInterface
     type clusterRoleBindings struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]
     }
     
     // newClusterRoleBindings returns a ClusterRoleBindings
     func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings {
     	return &clusterRoleBindings{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration](
    +			"clusterrolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} },
    +			func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
    -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    -	result = &v1beta1.ClusterRoleBinding{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
    -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.ClusterRoleBindingList{}
    -	err = c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
    -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    -	result = &v1beta1.ClusterRoleBinding{}
    -	err = c.client.Post().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
    -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    -	result = &v1beta1.ClusterRoleBinding{}
    -	err = c.client.Put().
    -		Resource("clusterrolebindings").
    -		Name(clusterRoleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(clusterRoleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
    -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("clusterrolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched clusterRoleBinding.
    -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
    -	result = &v1beta1.ClusterRoleBinding{}
    -	err = c.client.Patch(pt).
    -		Resource("clusterrolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding.
    -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    -	if clusterRoleBinding == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(clusterRoleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := clusterRoleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.ClusterRoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("clusterrolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
    index 2a94a4315e..b7996c106f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
    @@ -43,20 +43,22 @@ var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole")
     
     // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
     func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
    +	emptyResult := &v1beta1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRole), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
     func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
    +	emptyResult := &v1beta1.ClusterRoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested clusterRoles.
     func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts))
     }
     
     // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) {
    +	emptyResult := &v1beta1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRole), err
     }
     
     // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
     func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) {
    +	emptyResult := &v1beta1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRole), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched clusterRole.
     func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) {
    +	emptyResult := &v1beta1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRole), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.C
     	if name == nil {
     		return nil, fmt.Errorf("clusterRole.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ClusterRole{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRole{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRole), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
    index c9fd7c0cdd..8843757ace 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
    @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBi
     
     // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
     func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1beta1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
     func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
    +	emptyResult := &v1beta1.ClusterRoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{})
    +		Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions)
     // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
     func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts))
     }
     
     // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1beta1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRoleBinding), err
     }
     
     // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
     func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1beta1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRoleBinding), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.
     
     // Patch applies the patch and returns the patched clusterRoleBinding.
     func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
    +	emptyResult := &v1beta1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRoleBinding), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding
     	if name == nil {
     		return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.ClusterRoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRoleBinding{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.ClusterRoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
    index 4158cf1d55..aa0fe28a13 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
    @@ -44,22 +44,24 @@ var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role")
     
     // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
     func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
    +	emptyResult := &v1beta1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{})
    +		Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Role), err
     }
     
     // List takes label and field selectors, and returns the list of Roles that match those selectors.
     func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
    +	emptyResult := &v1beta1.RoleList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{})
    +		Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1be
     // Watch returns a watch.Interface that watches the requested roles.
     func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) {
    +	emptyResult := &v1beta1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{})
    +		Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Role), err
     }
     
     // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
     func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) {
    +	emptyResult := &v1beta1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Role), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.RoleList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions,
     
     // Patch applies the patch and returns the patched role.
     func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) {
    +	emptyResult := &v1beta1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Role), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfig
     	if name == nil {
     		return nil, fmt.Errorf("role.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.Role{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Role{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.Role), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
    index 4616f0fd10..26c3c80458 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
    @@ -44,22 +44,24 @@ var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding")
     
     // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
     func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
    +	emptyResult := &v1beta1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{})
    +		Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RoleBinding), err
     }
     
     // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
     func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
    +	emptyResult := &v1beta1.RoleBindingList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{})
    +		Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested roleBindings.
     func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) {
    +	emptyResult := &v1beta1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{})
    +		Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RoleBinding), err
     }
     
     // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
     func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) {
    +	emptyResult := &v1beta1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{})
    +		Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RoleBinding), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO
     
     // Patch applies the patch and returns the patched roleBinding.
     func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) {
    +	emptyResult := &v1beta1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RoleBinding), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.R
     	if name == nil {
     		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.RoleBinding{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.RoleBinding{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.RoleBinding), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
    index e789e42fe7..92e51da1b1 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/rbac/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RolesGetter has a method to return a RoleInterface.
    @@ -55,154 +52,18 @@ type RoleInterface interface {
     
     // roles implements RoleInterface
     type roles struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]
     }
     
     // newRoles returns a Roles
     func newRoles(c *RbacV1beta1Client, namespace string) *roles {
     	return &roles{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration](
    +			"roles",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.Role { return &v1beta1.Role{} },
    +			func() *v1beta1.RoleList { return &v1beta1.RoleList{} }),
     	}
     }
    -
    -// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
    -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
    -	result = &v1beta1.Role{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of Roles that match those selectors.
    -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.RoleList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roles.
    -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) {
    -	result = &v1beta1.Role{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
    -func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) {
    -	result = &v1beta1.Role{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(role.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(role).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the role and deletes it. Returns an error if one occurs.
    -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("roles").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched role.
    -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) {
    -	result = &v1beta1.Role{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied role.
    -func (c *roles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) {
    -	if role == nil {
    -		return nil, fmt.Errorf("role provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(role)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := role.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("role.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.Role{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("roles").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
    index 1461ba3b6e..ad31bd051a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/rbac/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // RoleBindingsGetter has a method to return a RoleBindingInterface.
    @@ -55,154 +52,18 @@ type RoleBindingInterface interface {
     
     // roleBindings implements RoleBindingInterface
     type roleBindings struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]
     }
     
     // newRoleBindings returns a RoleBindings
     func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings {
     	return &roleBindings{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration](
    +			"rolebindings",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} },
    +			func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }),
     	}
     }
    -
    -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
    -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
    -	result = &v1beta1.RoleBinding{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
    -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.RoleBindingList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested roleBindings.
    -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) {
    -	result = &v1beta1.RoleBinding{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
    -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) {
    -	result = &v1beta1.RoleBinding{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(roleBinding.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(roleBinding).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
    -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched roleBinding.
    -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) {
    -	result = &v1beta1.RoleBinding{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding.
    -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) {
    -	if roleBinding == nil {
    -		return nil, fmt.Errorf("roleBinding provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(roleBinding)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := roleBinding.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("roleBinding.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.RoleBinding{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("rolebindings").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go
    deleted file mode 100644
    index baaf2d9853..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go
    +++ /dev/null
    @@ -1,20 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -// This package has the automatically generated typed clients.
    -package v1alpha2
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go
    deleted file mode 100644
    index 54882f8175..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go
    +++ /dev/null
    @@ -1,189 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakePodSchedulingContexts implements PodSchedulingContextInterface
    -type FakePodSchedulingContexts struct {
    -	Fake *FakeResourceV1alpha2
    -	ns   string
    -}
    -
    -var podschedulingcontextsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts")
    -
    -var podschedulingcontextsKind = v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext")
    -
    -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any.
    -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(podschedulingcontextsResource, c.ns, name), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors.
    -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), &v1alpha2.PodSchedulingContextList{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.PodSchedulingContextList{ListMeta: obj.(*v1alpha2.PodSchedulingContextList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.PodSchedulingContextList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested podSchedulingContexts.
    -func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(podschedulingcontextsResource, c.ns, opts))
    -
    -}
    -
    -// Create takes the representation of a podSchedulingContext and creates it.  Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(podschedulingcontextsResource, "status", c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs.
    -func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha2.PodSchedulingContext{})
    -
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(podschedulingcontextsResource, c.ns, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingContextList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched podSchedulingContext.
    -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext.
    -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	if podSchedulingContext == nil {
    -		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(podSchedulingContext)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podSchedulingContext.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	if podSchedulingContext == nil {
    -		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(podSchedulingContext)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podSchedulingContext.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodSchedulingContext{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.PodSchedulingContext), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go
    deleted file mode 100644
    index 6f69d0fa79..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go
    +++ /dev/null
    @@ -1,64 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	v1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2"
    -	rest "k8s.io/client-go/rest"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -type FakeResourceV1alpha2 struct {
    -	*testing.Fake
    -}
    -
    -func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface {
    -	return &FakePodSchedulingContexts{c, namespace}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface {
    -	return &FakeResourceClaims{c, namespace}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceClaimParameters(namespace string) v1alpha2.ResourceClaimParametersInterface {
    -	return &FakeResourceClaimParameters{c, namespace}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceClaimTemplates(namespace string) v1alpha2.ResourceClaimTemplateInterface {
    -	return &FakeResourceClaimTemplates{c, namespace}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceClasses() v1alpha2.ResourceClassInterface {
    -	return &FakeResourceClasses{c}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceClassParameters(namespace string) v1alpha2.ResourceClassParametersInterface {
    -	return &FakeResourceClassParameters{c, namespace}
    -}
    -
    -func (c *FakeResourceV1alpha2) ResourceSlices() v1alpha2.ResourceSliceInterface {
    -	return &FakeResourceSlices{c}
    -}
    -
    -// RESTClient returns a RESTClient that is used to communicate
    -// with API server by this client implementation.
    -func (c *FakeResourceV1alpha2) RESTClient() rest.Interface {
    -	var ret *rest.RESTClient
    -	return ret
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go
    deleted file mode 100644
    index 087e51f714..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go
    +++ /dev/null
    @@ -1,189 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceClaims implements ResourceClaimInterface
    -type FakeResourceClaims struct {
    -	Fake *FakeResourceV1alpha2
    -	ns   string
    -}
    -
    -var resourceclaimsResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaims")
    -
    -var resourceclaimsKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim")
    -
    -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any.
    -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors.
    -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha2.ResourceClaimList{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceClaimList{ListMeta: obj.(*v1alpha2.ResourceClaimList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceClaimList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaims.
    -func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(resourceclaimsResource, c.ns, opts))
    -
    -}
    -
    -// Create takes the representation of a resourceClaim and creates it.  Returns the server's representation of the resourceClaim, and an error, if there is any.
    -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any.
    -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha2.ResourceClaim{})
    -
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(resourceclaimsResource, c.ns, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaim.
    -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim.
    -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	if resourceClaim == nil {
    -		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	if resourceClaim == nil {
    -		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.ResourceClaim{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaim), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go
    deleted file mode 100644
    index da32b5caec..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceClaimParameters implements ResourceClaimParametersInterface
    -type FakeResourceClaimParameters struct {
    -	Fake *FakeResourceV1alpha2
    -	ns   string
    -}
    -
    -var resourceclaimparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimparameters")
    -
    -var resourceclaimparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimParameters")
    -
    -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any.
    -func (c *FakeResourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(resourceclaimparametersResource, c.ns, name), &v1alpha2.ResourceClaimParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimParameters), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors.
    -func (c *FakeResourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(resourceclaimparametersResource, resourceclaimparametersKind, c.ns, opts), &v1alpha2.ResourceClaimParametersList{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceClaimParametersList{ListMeta: obj.(*v1alpha2.ResourceClaimParametersList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceClaimParametersList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaimParameters.
    -func (c *FakeResourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(resourceclaimparametersResource, c.ns, opts))
    -
    -}
    -
    -// Create takes the representation of a resourceClaimParameters and creates it.  Returns the server's representation of the resourceClaimParameters, and an error, if there is any.
    -func (c *FakeResourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimParameters), err
    -}
    -
    -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any.
    -func (c *FakeResourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimParameters), err
    -}
    -
    -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewDeleteActionWithOptions(resourceclaimparametersResource, c.ns, name, opts), &v1alpha2.ResourceClaimParameters{})
    -
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(resourceclaimparametersResource, c.ns, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimParametersList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaimParameters.
    -func (c *FakeResourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimParameters), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters.
    -func (c *FakeResourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	if resourceClaimParameters == nil {
    -		return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClaimParameters)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaimParameters.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimParameters), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go
    deleted file mode 100644
    index 2a1b4554eb..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceClaimTemplates implements ResourceClaimTemplateInterface
    -type FakeResourceClaimTemplates struct {
    -	Fake *FakeResourceV1alpha2
    -	ns   string
    -}
    -
    -var resourceclaimtemplatesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates")
    -
    -var resourceclaimtemplatesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate")
    -
    -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any.
    -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha2.ResourceClaimTemplate{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimTemplate), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors.
    -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha2.ResourceClaimTemplateList{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha2.ResourceClaimTemplateList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceClaimTemplateList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates.
    -func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(resourceclaimtemplatesResource, c.ns, opts))
    -
    -}
    -
    -// Create takes the representation of a resourceClaimTemplate and creates it.  Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimTemplate), err
    -}
    -
    -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimTemplate), err
    -}
    -
    -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha2.ResourceClaimTemplate{})
    -
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(resourceclaimtemplatesResource, c.ns, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimTemplateList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaimTemplate.
    -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimTemplate{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimTemplate), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate.
    -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	if resourceClaimTemplate == nil {
    -		return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClaimTemplate)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaimTemplate.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimTemplate{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClaimTemplate), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go
    deleted file mode 100644
    index 4d247c5136..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go
    +++ /dev/null
    @@ -1,145 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceClasses implements ResourceClassInterface
    -type FakeResourceClasses struct {
    -	Fake *FakeResourceV1alpha2
    -}
    -
    -var resourceclassesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclasses")
    -
    -var resourceclassesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClass")
    -
    -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any.
    -func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha2.ResourceClass{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClass), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors.
    -func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha2.ResourceClassList{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceClassList{ListMeta: obj.(*v1alpha2.ResourceClassList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceClassList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClasses.
    -func (c *FakeResourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(resourceclassesResource, opts))
    -}
    -
    -// Create takes the representation of a resourceClass and creates it.  Returns the server's representation of the resourceClass, and an error, if there is any.
    -func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClass), err
    -}
    -
    -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any.
    -func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClass), err
    -}
    -
    -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha2.ResourceClass{})
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(resourceclassesResource, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceClass.
    -func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha2.ResourceClass{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClass), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass.
    -func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) {
    -	if resourceClass == nil {
    -		return nil, fmt.Errorf("resourceClass provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClass.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClass{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClass), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go
    deleted file mode 100644
    index c11762963f..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceClassParameters implements ResourceClassParametersInterface
    -type FakeResourceClassParameters struct {
    -	Fake *FakeResourceV1alpha2
    -	ns   string
    -}
    -
    -var resourceclassparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters")
    -
    -var resourceclassparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClassParameters")
    -
    -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any.
    -func (c *FakeResourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(resourceclassparametersResource, c.ns, name), &v1alpha2.ResourceClassParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClassParameters), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors.
    -func (c *FakeResourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(resourceclassparametersResource, resourceclassparametersKind, c.ns, opts), &v1alpha2.ResourceClassParametersList{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceClassParametersList{ListMeta: obj.(*v1alpha2.ResourceClassParametersList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceClassParametersList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClassParameters.
    -func (c *FakeResourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(resourceclassparametersResource, c.ns, opts))
    -
    -}
    -
    -// Create takes the representation of a resourceClassParameters and creates it.  Returns the server's representation of the resourceClassParameters, and an error, if there is any.
    -func (c *FakeResourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClassParameters), err
    -}
    -
    -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any.
    -func (c *FakeResourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClassParameters), err
    -}
    -
    -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewDeleteActionWithOptions(resourceclassparametersResource, c.ns, name, opts), &v1alpha2.ResourceClassParameters{})
    -
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(resourceclassparametersResource, c.ns, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassParametersList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceClassParameters.
    -func (c *FakeResourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClassParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClassParameters), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters.
    -func (c *FakeResourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	if resourceClassParameters == nil {
    -		return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceClassParameters)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClassParameters.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClassParameters{})
    -
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceClassParameters), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go
    deleted file mode 100644
    index 325e729e92..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go
    +++ /dev/null
    @@ -1,145 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package fake
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	labels "k8s.io/apimachinery/pkg/labels"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	testing "k8s.io/client-go/testing"
    -)
    -
    -// FakeResourceSlices implements ResourceSliceInterface
    -type FakeResourceSlices struct {
    -	Fake *FakeResourceV1alpha2
    -}
    -
    -var resourceslicesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceslices")
    -
    -var resourceslicesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceSlice")
    -
    -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any.
    -func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(resourceslicesResource, name), &v1alpha2.ResourceSlice{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceSlice), err
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors.
    -func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(resourceslicesResource, resourceslicesKind, opts), &v1alpha2.ResourceSliceList{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -
    -	label, _, _ := testing.ExtractFromListOptions(opts)
    -	if label == nil {
    -		label = labels.Everything()
    -	}
    -	list := &v1alpha2.ResourceSliceList{ListMeta: obj.(*v1alpha2.ResourceSliceList).ListMeta}
    -	for _, item := range obj.(*v1alpha2.ResourceSliceList).Items {
    -		if label.Matches(labels.Set(item.Labels)) {
    -			list.Items = append(list.Items, item)
    -		}
    -	}
    -	return list, err
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceSlices.
    -func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(resourceslicesResource, opts))
    -}
    -
    -// Create takes the representation of a resourceSlice and creates it.  Returns the server's representation of the resourceSlice, and an error, if there is any.
    -func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceSlice), err
    -}
    -
    -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
    -func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceSlice), err
    -}
    -
    -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs.
    -func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	_, err := c.Fake.
    -		Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha2.ResourceSlice{})
    -	return err
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(resourceslicesResource, listOpts)
    -
    -	_, err := c.Fake.Invokes(action, &v1alpha2.ResourceSliceList{})
    -	return err
    -}
    -
    -// Patch applies the patch and returns the patched resourceSlice.
    -func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) {
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, name, pt, data, subresources...), &v1alpha2.ResourceSlice{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceSlice), err
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice.
    -func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	if resourceSlice == nil {
    -		return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil")
    -	}
    -	data, err := json.Marshal(resourceSlice)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceSlice.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply")
    -	}
    -	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceSlice{})
    -	if obj == nil {
    -		return nil, err
    -	}
    -	return obj.(*v1alpha2.ResourceSlice), err
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go
    deleted file mode 100644
    index d11410bb9b..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -type PodSchedulingContextExpansion interface{}
    -
    -type ResourceClaimExpansion interface{}
    -
    -type ResourceClaimParametersExpansion interface{}
    -
    -type ResourceClaimTemplateExpansion interface{}
    -
    -type ResourceClassExpansion interface{}
    -
    -type ResourceClassParametersExpansion interface{}
    -
    -type ResourceSliceExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go
    deleted file mode 100644
    index 72e81a29e3..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go
    +++ /dev/null
    @@ -1,256 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface.
    -// A group's client should implement this interface.
    -type PodSchedulingContextsGetter interface {
    -	PodSchedulingContexts(namespace string) PodSchedulingContextInterface
    -}
    -
    -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources.
    -type PodSchedulingContextInterface interface {
    -	Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error)
    -	Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error)
    -	UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error)
    -	Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error)
    -	ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error)
    -	PodSchedulingContextExpansion
    -}
    -
    -// podSchedulingContexts implements PodSchedulingContextInterface
    -type podSchedulingContexts struct {
    -	client rest.Interface
    -	ns     string
    -}
    -
    -// newPodSchedulingContexts returns a PodSchedulingContexts
    -func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts {
    -	return &podSchedulingContexts{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    -	}
    -}
    -
    -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any.
    -func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors.
    -func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.PodSchedulingContextList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested podSchedulingContexts.
    -func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a podSchedulingContext and creates it.  Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    -func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podSchedulingContext).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    -func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(podSchedulingContext.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podSchedulingContext).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(podSchedulingContext.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(podSchedulingContext).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs.
    -func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched podSchedulingContext.
    -func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) {
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext.
    -func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	if podSchedulingContext == nil {
    -		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podSchedulingContext)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := podSchedulingContext.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
    -	if podSchedulingContext == nil {
    -		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(podSchedulingContext)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := podSchedulingContext.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha2.PodSchedulingContext{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("podschedulingcontexts").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go
    deleted file mode 100644
    index 8e258b3e1c..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go
    +++ /dev/null
    @@ -1,137 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"net/http"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	"k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -type ResourceV1alpha2Interface interface {
    -	RESTClient() rest.Interface
    -	PodSchedulingContextsGetter
    -	ResourceClaimsGetter
    -	ResourceClaimParametersGetter
    -	ResourceClaimTemplatesGetter
    -	ResourceClassesGetter
    -	ResourceClassParametersGetter
    -	ResourceSlicesGetter
    -}
    -
    -// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group.
    -type ResourceV1alpha2Client struct {
    -	restClient rest.Interface
    -}
    -
    -func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface {
    -	return newPodSchedulingContexts(c, namespace)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface {
    -	return newResourceClaims(c, namespace)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceClaimParameters(namespace string) ResourceClaimParametersInterface {
    -	return newResourceClaimParameters(c, namespace)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface {
    -	return newResourceClaimTemplates(c, namespace)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface {
    -	return newResourceClasses(c)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceClassParameters(namespace string) ResourceClassParametersInterface {
    -	return newResourceClassParameters(c, namespace)
    -}
    -
    -func (c *ResourceV1alpha2Client) ResourceSlices() ResourceSliceInterface {
    -	return newResourceSlices(c)
    -}
    -
    -// NewForConfig creates a new ResourceV1alpha2Client for the given config.
    -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
    -// where httpClient was generated with rest.HTTPClientFor(c).
    -func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) {
    -	config := *c
    -	if err := setConfigDefaults(&config); err != nil {
    -		return nil, err
    -	}
    -	httpClient, err := rest.HTTPClientFor(&config)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return NewForConfigAndClient(&config, httpClient)
    -}
    -
    -// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client.
    -// Note the http client provided takes precedence over the configured transport values.
    -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) {
    -	config := *c
    -	if err := setConfigDefaults(&config); err != nil {
    -		return nil, err
    -	}
    -	client, err := rest.RESTClientForConfigAndClient(&config, h)
    -	if err != nil {
    -		return nil, err
    -	}
    -	return &ResourceV1alpha2Client{client}, nil
    -}
    -
    -// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and
    -// panics if there is an error in the config.
    -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client {
    -	client, err := NewForConfig(c)
    -	if err != nil {
    -		panic(err)
    -	}
    -	return client
    -}
    -
    -// New creates a new ResourceV1alpha2Client for the given RESTClient.
    -func New(c rest.Interface) *ResourceV1alpha2Client {
    -	return &ResourceV1alpha2Client{c}
    -}
    -
    -func setConfigDefaults(config *rest.Config) error {
    -	gv := v1alpha2.SchemeGroupVersion
    -	config.GroupVersion = &gv
    -	config.APIPath = "/apis"
    -	config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
    -
    -	if config.UserAgent == "" {
    -		config.UserAgent = rest.DefaultKubernetesUserAgent()
    -	}
    -
    -	return nil
    -}
    -
    -// RESTClient returns a RESTClient that is used to communicate
    -// with API server by this client implementation.
    -func (c *ResourceV1alpha2Client) RESTClient() rest.Interface {
    -	if c == nil {
    -		return nil
    -	}
    -	return c.restClient
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go
    deleted file mode 100644
    index cfb27c9db6..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go
    +++ /dev/null
    @@ -1,256 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceClaimsGetter has a method to return a ResourceClaimInterface.
    -// A group's client should implement this interface.
    -type ResourceClaimsGetter interface {
    -	ResourceClaims(namespace string) ResourceClaimInterface
    -}
    -
    -// ResourceClaimInterface has methods to work with ResourceClaim resources.
    -type ResourceClaimInterface interface {
    -	Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error)
    -	Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error)
    -	UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error)
    -	Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error)
    -	ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error)
    -	ResourceClaimExpansion
    -}
    -
    -// resourceClaims implements ResourceClaimInterface
    -type resourceClaims struct {
    -	client rest.Interface
    -	ns     string
    -}
    -
    -// newResourceClaims returns a ResourceClaims
    -func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims {
    -	return &resourceClaims{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    -	}
    -}
    -
    -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any.
    -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors.
    -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceClaimList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaims.
    -func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceClaim and creates it.  Returns the server's representation of the resourceClaim, and an error, if there is any.
    -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any.
    -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(resourceClaim.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(resourceClaim.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaim).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs.
    -func (c *resourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaim.
    -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) {
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim.
    -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	if resourceClaim == nil {
    -		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) {
    -	if resourceClaim == nil {
    -		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClaim)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := resourceClaim.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha2.ResourceClaim{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourceclaims").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go
    deleted file mode 100644
    index d08afcb611..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go
    +++ /dev/null
    @@ -1,208 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceClaimParametersGetter has a method to return a ResourceClaimParametersInterface.
    -// A group's client should implement this interface.
    -type ResourceClaimParametersGetter interface {
    -	ResourceClaimParameters(namespace string) ResourceClaimParametersInterface
    -}
    -
    -// ResourceClaimParametersInterface has methods to work with ResourceClaimParameters resources.
    -type ResourceClaimParametersInterface interface {
    -	Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClaimParameters, error)
    -	Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimParameters, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimParameters, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimParametersList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error)
    -	Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error)
    -	ResourceClaimParametersExpansion
    -}
    -
    -// resourceClaimParameters implements ResourceClaimParametersInterface
    -type resourceClaimParameters struct {
    -	client rest.Interface
    -	ns     string
    -}
    -
    -// newResourceClaimParameters returns a ResourceClaimParameters
    -func newResourceClaimParameters(c *ResourceV1alpha2Client, namespace string) *resourceClaimParameters {
    -	return &resourceClaimParameters{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    -	}
    -}
    -
    -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any.
    -func (c *resourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	result = &v1alpha2.ResourceClaimParameters{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors.
    -func (c *resourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceClaimParametersList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaimParameters.
    -func (c *resourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceClaimParameters and creates it.  Returns the server's representation of the resourceClaimParameters, and an error, if there is any.
    -func (c *resourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	result = &v1alpha2.ResourceClaimParameters{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaimParameters).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any.
    -func (c *resourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	result = &v1alpha2.ResourceClaimParameters{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		Name(resourceClaimParameters.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaimParameters).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs.
    -func (c *resourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaimParameters.
    -func (c *resourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	result = &v1alpha2.ResourceClaimParameters{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters.
    -func (c *resourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) {
    -	if resourceClaimParameters == nil {
    -		return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClaimParameters)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaimParameters.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceClaimParameters{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourceclaimparameters").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go
    deleted file mode 100644
    index 3f4e320064..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go
    +++ /dev/null
    @@ -1,208 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface.
    -// A group's client should implement this interface.
    -type ResourceClaimTemplatesGetter interface {
    -	ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface
    -}
    -
    -// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources.
    -type ResourceClaimTemplateInterface interface {
    -	Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error)
    -	Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error)
    -	Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error)
    -	ResourceClaimTemplateExpansion
    -}
    -
    -// resourceClaimTemplates implements ResourceClaimTemplateInterface
    -type resourceClaimTemplates struct {
    -	client rest.Interface
    -	ns     string
    -}
    -
    -// newResourceClaimTemplates returns a ResourceClaimTemplates
    -func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates {
    -	return &resourceClaimTemplates{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    -	}
    -}
    -
    -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any.
    -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	result = &v1alpha2.ResourceClaimTemplate{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors.
    -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceClaimTemplateList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates.
    -func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceClaimTemplate and creates it.  Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	result = &v1alpha2.ResourceClaimTemplate{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaimTemplate).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	result = &v1alpha2.ResourceClaimTemplate{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		Name(resourceClaimTemplate.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClaimTemplate).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs.
    -func (c *resourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceClaimTemplate.
    -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	result = &v1alpha2.ResourceClaimTemplate{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate.
    -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) {
    -	if resourceClaimTemplate == nil {
    -		return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClaimTemplate)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClaimTemplate.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceClaimTemplate{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourceclaimtemplates").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go
    deleted file mode 100644
    index 95a4ac5668..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go
    +++ /dev/null
    @@ -1,197 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceClassesGetter has a method to return a ResourceClassInterface.
    -// A group's client should implement this interface.
    -type ResourceClassesGetter interface {
    -	ResourceClasses() ResourceClassInterface
    -}
    -
    -// ResourceClassInterface has methods to work with ResourceClass resources.
    -type ResourceClassInterface interface {
    -	Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error)
    -	Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error)
    -	Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error)
    -	ResourceClassExpansion
    -}
    -
    -// resourceClasses implements ResourceClassInterface
    -type resourceClasses struct {
    -	client rest.Interface
    -}
    -
    -// newResourceClasses returns a ResourceClasses
    -func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses {
    -	return &resourceClasses{
    -		client: c.RESTClient(),
    -	}
    -}
    -
    -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any.
    -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) {
    -	result = &v1alpha2.ResourceClass{}
    -	err = c.client.Get().
    -		Resource("resourceclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors.
    -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceClassList{}
    -	err = c.client.Get().
    -		Resource("resourceclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClasses.
    -func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("resourceclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceClass and creates it.  Returns the server's representation of the resourceClass, and an error, if there is any.
    -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) {
    -	result = &v1alpha2.ResourceClass{}
    -	err = c.client.Post().
    -		Resource("resourceclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any.
    -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) {
    -	result = &v1alpha2.ResourceClass{}
    -	err = c.client.Put().
    -		Resource("resourceclasses").
    -		Name(resourceClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs.
    -func (c *resourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("resourceclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("resourceclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceClass.
    -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) {
    -	result = &v1alpha2.ResourceClass{}
    -	err = c.client.Patch(pt).
    -		Resource("resourceclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass.
    -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) {
    -	if resourceClass == nil {
    -		return nil, fmt.Errorf("resourceClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClass.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("resourceclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go
    deleted file mode 100644
    index 8ac9be0784..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go
    +++ /dev/null
    @@ -1,208 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceClassParametersGetter has a method to return a ResourceClassParametersInterface.
    -// A group's client should implement this interface.
    -type ResourceClassParametersGetter interface {
    -	ResourceClassParameters(namespace string) ResourceClassParametersInterface
    -}
    -
    -// ResourceClassParametersInterface has methods to work with ResourceClassParameters resources.
    -type ResourceClassParametersInterface interface {
    -	Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClassParameters, error)
    -	Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClassParameters, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClassParameters, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassParametersList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error)
    -	Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error)
    -	ResourceClassParametersExpansion
    -}
    -
    -// resourceClassParameters implements ResourceClassParametersInterface
    -type resourceClassParameters struct {
    -	client rest.Interface
    -	ns     string
    -}
    -
    -// newResourceClassParameters returns a ResourceClassParameters
    -func newResourceClassParameters(c *ResourceV1alpha2Client, namespace string) *resourceClassParameters {
    -	return &resourceClassParameters{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    -	}
    -}
    -
    -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any.
    -func (c *resourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	result = &v1alpha2.ResourceClassParameters{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors.
    -func (c *resourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceClassParametersList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceClassParameters.
    -func (c *resourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceClassParameters and creates it.  Returns the server's representation of the resourceClassParameters, and an error, if there is any.
    -func (c *resourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	result = &v1alpha2.ResourceClassParameters{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClassParameters).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any.
    -func (c *resourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	result = &v1alpha2.ResourceClassParameters{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		Name(resourceClassParameters.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceClassParameters).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs.
    -func (c *resourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceClassParameters.
    -func (c *resourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) {
    -	result = &v1alpha2.ResourceClassParameters{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters.
    -func (c *resourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) {
    -	if resourceClassParameters == nil {
    -		return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceClassParameters)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceClassParameters.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceClassParameters{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("resourceclassparameters").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go
    deleted file mode 100644
    index 302f370d52..0000000000
    --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go
    +++ /dev/null
    @@ -1,197 +0,0 @@
    -/*
    -Copyright The Kubernetes Authors.
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -*/
    -
    -// Code generated by client-gen. DO NOT EDIT.
    -
    -package v1alpha2
    -
    -import (
    -	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
    -
    -	v1alpha2 "k8s.io/api/resource/v1alpha2"
    -	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    -	types "k8s.io/apimachinery/pkg/types"
    -	watch "k8s.io/apimachinery/pkg/watch"
    -	resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
    -	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
    -)
    -
    -// ResourceSlicesGetter has a method to return a ResourceSliceInterface.
    -// A group's client should implement this interface.
    -type ResourceSlicesGetter interface {
    -	ResourceSlices() ResourceSliceInterface
    -}
    -
    -// ResourceSliceInterface has methods to work with ResourceSlice resources.
    -type ResourceSliceInterface interface {
    -	Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (*v1alpha2.ResourceSlice, error)
    -	Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (*v1alpha2.ResourceSlice, error)
    -	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    -	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    -	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceSlice, error)
    -	List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceSliceList, error)
    -	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    -	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error)
    -	Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error)
    -	ResourceSliceExpansion
    -}
    -
    -// resourceSlices implements ResourceSliceInterface
    -type resourceSlices struct {
    -	client rest.Interface
    -}
    -
    -// newResourceSlices returns a ResourceSlices
    -func newResourceSlices(c *ResourceV1alpha2Client) *resourceSlices {
    -	return &resourceSlices{
    -		client: c.RESTClient(),
    -	}
    -}
    -
    -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any.
    -func (c *resourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	result = &v1alpha2.ResourceSlice{}
    -	err = c.client.Get().
    -		Resource("resourceslices").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors.
    -func (c *resourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha2.ResourceSliceList{}
    -	err = c.client.Get().
    -		Resource("resourceslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested resourceSlices.
    -func (c *resourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("resourceslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a resourceSlice and creates it.  Returns the server's representation of the resourceSlice, and an error, if there is any.
    -func (c *resourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	result = &v1alpha2.ResourceSlice{}
    -	err = c.client.Post().
    -		Resource("resourceslices").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
    -func (c *resourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	result = &v1alpha2.ResourceSlice{}
    -	err = c.client.Put().
    -		Resource("resourceslices").
    -		Name(resourceSlice.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(resourceSlice).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs.
    -func (c *resourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("resourceslices").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *resourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("resourceslices").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched resourceSlice.
    -func (c *resourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) {
    -	result = &v1alpha2.ResourceSlice{}
    -	err = c.client.Patch(pt).
    -		Resource("resourceslices").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice.
    -func (c *resourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) {
    -	if resourceSlice == nil {
    -		return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(resourceSlice)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := resourceSlice.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply")
    -	}
    -	result = &v1alpha2.ResourceSlice{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("resourceslices").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go
    new file mode 100644
    index 0000000000..35455dfa35
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// DeviceClassesGetter has a method to return a DeviceClassInterface.
    +// A group's client should implement this interface.
    +type DeviceClassesGetter interface {
    +	DeviceClasses() DeviceClassInterface
    +}
    +
    +// DeviceClassInterface has methods to work with DeviceClass resources.
    +type DeviceClassInterface interface {
    +	Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error)
    +	Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error)
    +	Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error)
    +	DeviceClassExpansion
    +}
    +
    +// deviceClasses implements DeviceClassInterface
    +type deviceClasses struct {
    +	*gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]
    +}
    +
    +// newDeviceClasses returns a DeviceClasses
    +func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses {
    +	return &deviceClasses{
    +		gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration](
    +			"deviceclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} },
    +			func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go
    new file mode 100644
    index 0000000000..fdb23fd37c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go
    @@ -0,0 +1,20 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +// This package has the automatically generated typed clients.
    +package v1alpha3
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go
    new file mode 100644
    index 0000000000..16f4439906
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go
    @@ -0,0 +1,20 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +// Package fake has the automatically generated clients.
    +package fake
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go
    new file mode 100644
    index 0000000000..d96cbd2219
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go
    @@ -0,0 +1,151 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeDeviceClasses implements DeviceClassInterface
    +type FakeDeviceClasses struct {
    +	Fake *FakeResourceV1alpha3
    +}
    +
    +var deviceclassesResource = v1alpha3.SchemeGroupVersion.WithResource("deviceclasses")
    +
    +var deviceclassesKind = v1alpha3.SchemeGroupVersion.WithKind("DeviceClass")
    +
    +// Get takes name of the deviceClass, and returns the corresponding deviceClass object, and an error if there is any.
    +func (c *FakeDeviceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.DeviceClass, err error) {
    +	emptyResult := &v1alpha3.DeviceClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootGetActionWithOptions(deviceclassesResource, name, options), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.DeviceClass), err
    +}
    +
    +// List takes label and field selectors, and returns the list of DeviceClasses that match those selectors.
    +func (c *FakeDeviceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.DeviceClassList, err error) {
    +	emptyResult := &v1alpha3.DeviceClassList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootListActionWithOptions(deviceclassesResource, deviceclassesKind, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha3.DeviceClassList{ListMeta: obj.(*v1alpha3.DeviceClassList).ListMeta}
    +	for _, item := range obj.(*v1alpha3.DeviceClassList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested deviceClasses.
    +func (c *FakeDeviceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(deviceclassesResource, opts))
    +}
    +
    +// Create takes the representation of a deviceClass and creates it.  Returns the server's representation of the deviceClass, and an error, if there is any.
    +func (c *FakeDeviceClasses) Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (result *v1alpha3.DeviceClass, err error) {
    +	emptyResult := &v1alpha3.DeviceClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootCreateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.DeviceClass), err
    +}
    +
    +// Update takes the representation of a deviceClass and updates it. Returns the server's representation of the deviceClass, and an error, if there is any.
    +func (c *FakeDeviceClasses) Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (result *v1alpha3.DeviceClass, err error) {
    +	emptyResult := &v1alpha3.DeviceClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.DeviceClass), err
    +}
    +
    +// Delete takes name of the deviceClass and deletes it. Returns an error if one occurs.
    +func (c *FakeDeviceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewRootDeleteActionWithOptions(deviceclassesResource, name, opts), &v1alpha3.DeviceClass{})
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeDeviceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewRootDeleteCollectionActionWithOptions(deviceclassesResource, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha3.DeviceClassList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched deviceClass.
    +func (c *FakeDeviceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) {
    +	emptyResult := &v1alpha3.DeviceClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, name, pt, data, opts, subresources...), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.DeviceClass), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied deviceClass.
    +func (c *FakeDeviceClasses) Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) {
    +	if deviceClass == nil {
    +		return nil, fmt.Errorf("deviceClass provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(deviceClass)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := deviceClass.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("deviceClass.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.DeviceClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.DeviceClass), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go
    new file mode 100644
    index 0000000000..54898993e5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go
    @@ -0,0 +1,197 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakePodSchedulingContexts implements PodSchedulingContextInterface
    +type FakePodSchedulingContexts struct {
    +	Fake *FakeResourceV1alpha3
    +	ns   string
    +}
    +
    +var podschedulingcontextsResource = v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts")
    +
    +var podschedulingcontextsKind = v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext")
    +
    +// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any.
    +func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewGetActionWithOptions(podschedulingcontextsResource, c.ns, name, options), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors.
    +func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.PodSchedulingContextList, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContextList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewListActionWithOptions(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha3.PodSchedulingContextList{ListMeta: obj.(*v1alpha3.PodSchedulingContextList).ListMeta}
    +	for _, item := range obj.(*v1alpha3.PodSchedulingContextList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested podSchedulingContexts.
    +func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewWatchActionWithOptions(podschedulingcontextsResource, c.ns, opts))
    +
    +}
    +
    +// Create takes the representation of a podSchedulingContext and creates it.  Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    +func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewCreateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
    +func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// UpdateStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(podschedulingcontextsResource, "status", c.ns, podSchedulingContext, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs.
    +func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha3.PodSchedulingContext{})
    +
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewDeleteCollectionActionWithOptions(podschedulingcontextsResource, c.ns, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha3.PodSchedulingContextList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched podSchedulingContext.
    +func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) {
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext.
    +func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	if podSchedulingContext == nil {
    +		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(podSchedulingContext)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := podSchedulingContext.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    +
    +// ApplyStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) {
    +	if podSchedulingContext == nil {
    +		return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(podSchedulingContext)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := podSchedulingContext.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.PodSchedulingContext{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.PodSchedulingContext), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go
    new file mode 100644
    index 0000000000..4523d9f09c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go
    @@ -0,0 +1,56 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	v1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3"
    +	rest "k8s.io/client-go/rest"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +type FakeResourceV1alpha3 struct {
    +	*testing.Fake
    +}
    +
    +func (c *FakeResourceV1alpha3) DeviceClasses() v1alpha3.DeviceClassInterface {
    +	return &FakeDeviceClasses{c}
    +}
    +
    +func (c *FakeResourceV1alpha3) PodSchedulingContexts(namespace string) v1alpha3.PodSchedulingContextInterface {
    +	return &FakePodSchedulingContexts{c, namespace}
    +}
    +
    +func (c *FakeResourceV1alpha3) ResourceClaims(namespace string) v1alpha3.ResourceClaimInterface {
    +	return &FakeResourceClaims{c, namespace}
    +}
    +
    +func (c *FakeResourceV1alpha3) ResourceClaimTemplates(namespace string) v1alpha3.ResourceClaimTemplateInterface {
    +	return &FakeResourceClaimTemplates{c, namespace}
    +}
    +
    +func (c *FakeResourceV1alpha3) ResourceSlices() v1alpha3.ResourceSliceInterface {
    +	return &FakeResourceSlices{c}
    +}
    +
    +// RESTClient returns a RESTClient that is used to communicate
    +// with API server by this client implementation.
    +func (c *FakeResourceV1alpha3) RESTClient() rest.Interface {
    +	var ret *rest.RESTClient
    +	return ret
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go
    new file mode 100644
    index 0000000000..db38b3d60c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go
    @@ -0,0 +1,197 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeResourceClaims implements ResourceClaimInterface
    +type FakeResourceClaims struct {
    +	Fake *FakeResourceV1alpha3
    +	ns   string
    +}
    +
    +var resourceclaimsResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaims")
    +
    +var resourceclaimsKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim")
    +
    +// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any.
    +func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewGetActionWithOptions(resourceclaimsResource, c.ns, name, options), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors.
    +func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimList, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewListActionWithOptions(resourceclaimsResource, resourceclaimsKind, c.ns, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha3.ResourceClaimList{ListMeta: obj.(*v1alpha3.ResourceClaimList).ListMeta}
    +	for _, item := range obj.(*v1alpha3.ResourceClaimList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested resourceClaims.
    +func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimsResource, c.ns, opts))
    +
    +}
    +
    +// Create takes the representation of a resourceClaim and creates it.  Returns the server's representation of the resourceClaim, and an error, if there is any.
    +func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewCreateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any.
    +func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// UpdateStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateSubresourceActionWithOptions(resourceclaimsResource, "status", c.ns, resourceClaim, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs.
    +func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha3.ResourceClaim{})
    +
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewDeleteCollectionActionWithOptions(resourceclaimsResource, c.ns, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched resourceClaim.
    +func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) {
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim.
    +func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	if resourceClaim == nil {
    +		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(resourceClaim)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := resourceClaim.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    +
    +// ApplyStatus was generated because the type contains a Status member.
    +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) {
    +	if resourceClaim == nil {
    +		return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(resourceClaim)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := resourceClaim.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.ResourceClaim{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaim), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go
    new file mode 100644
    index 0000000000..28db7261f9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go
    @@ -0,0 +1,160 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeResourceClaimTemplates implements ResourceClaimTemplateInterface
    +type FakeResourceClaimTemplates struct {
    +	Fake *FakeResourceV1alpha3
    +	ns   string
    +}
    +
    +var resourceclaimtemplatesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates")
    +
    +var resourceclaimtemplatesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate")
    +
    +// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any.
    +func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaimTemplate, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimTemplate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewGetActionWithOptions(resourceclaimtemplatesResource, c.ns, name, options), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaimTemplate), err
    +}
    +
    +// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors.
    +func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimTemplateList, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimTemplateList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewListActionWithOptions(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha3.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha3.ResourceClaimTemplateList).ListMeta}
    +	for _, item := range obj.(*v1alpha3.ResourceClaimTemplateList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested resourceClaimTemplates.
    +func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimtemplatesResource, c.ns, opts))
    +
    +}
    +
    +// Create takes the representation of a resourceClaimTemplate and creates it.  Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    +func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimTemplate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewCreateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaimTemplate), err
    +}
    +
    +// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any.
    +func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimTemplate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewUpdateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaimTemplate), err
    +}
    +
    +// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs.
    +func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha3.ResourceClaimTemplate{})
    +
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewDeleteCollectionActionWithOptions(resourceclaimtemplatesResource, c.ns, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimTemplateList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched resourceClaimTemplate.
    +func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) {
    +	emptyResult := &v1alpha3.ResourceClaimTemplate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaimTemplate), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate.
    +func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) {
    +	if resourceClaimTemplate == nil {
    +		return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(resourceClaimTemplate)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := resourceClaimTemplate.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.ResourceClaimTemplate{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceClaimTemplate), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go
    new file mode 100644
    index 0000000000..c355fc454a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go
    @@ -0,0 +1,151 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeResourceSlices implements ResourceSliceInterface
    +type FakeResourceSlices struct {
    +	Fake *FakeResourceV1alpha3
    +}
    +
    +var resourceslicesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceslices")
    +
    +var resourceslicesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice")
    +
    +// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any.
    +func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceSlice, err error) {
    +	emptyResult := &v1alpha3.ResourceSlice{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootGetActionWithOptions(resourceslicesResource, name, options), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceSlice), err
    +}
    +
    +// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors.
    +func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceSliceList, err error) {
    +	emptyResult := &v1alpha3.ResourceSliceList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootListActionWithOptions(resourceslicesResource, resourceslicesKind, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1alpha3.ResourceSliceList{ListMeta: obj.(*v1alpha3.ResourceSliceList).ListMeta}
    +	for _, item := range obj.(*v1alpha3.ResourceSliceList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested resourceSlices.
    +func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(resourceslicesResource, opts))
    +}
    +
    +// Create takes the representation of a resourceSlice and creates it.  Returns the server's representation of the resourceSlice, and an error, if there is any.
    +func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (result *v1alpha3.ResourceSlice, err error) {
    +	emptyResult := &v1alpha3.ResourceSlice{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootCreateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceSlice), err
    +}
    +
    +// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
    +func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha3.ResourceSlice, err error) {
    +	emptyResult := &v1alpha3.ResourceSlice{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceSlice), err
    +}
    +
    +// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs.
    +func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha3.ResourceSlice{})
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewRootDeleteCollectionActionWithOptions(resourceslicesResource, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1alpha3.ResourceSliceList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched resourceSlice.
    +func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) {
    +	emptyResult := &v1alpha3.ResourceSlice{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, name, pt, data, opts, subresources...), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceSlice), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice.
    +func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) {
    +	if resourceSlice == nil {
    +		return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(resourceSlice)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := resourceSlice.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1alpha3.ResourceSlice{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1alpha3.ResourceSlice), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go
    new file mode 100644
    index 0000000000..747e564b76
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go
    @@ -0,0 +1,29 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +type DeviceClassExpansion interface{}
    +
    +type PodSchedulingContextExpansion interface{}
    +
    +type ResourceClaimExpansion interface{}
    +
    +type ResourceClaimTemplateExpansion interface{}
    +
    +type ResourceSliceExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go
    new file mode 100644
    index 0000000000..af59843212
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface.
    +// A group's client should implement this interface.
    +type PodSchedulingContextsGetter interface {
    +	PodSchedulingContexts(namespace string) PodSchedulingContextInterface
    +}
    +
    +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources.
    +type PodSchedulingContextInterface interface {
    +	Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error)
    +	Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +	UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error)
    +	Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +	ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error)
    +	PodSchedulingContextExpansion
    +}
    +
    +// podSchedulingContexts implements PodSchedulingContextInterface
    +type podSchedulingContexts struct {
    +	*gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]
    +}
    +
    +// newPodSchedulingContexts returns a PodSchedulingContexts
    +func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts {
    +	return &podSchedulingContexts{
    +		gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration](
    +			"podschedulingcontexts",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} },
    +			func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go
    new file mode 100644
    index 0000000000..879f0990d7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go
    @@ -0,0 +1,127 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"net/http"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/client-go/kubernetes/scheme"
    +	rest "k8s.io/client-go/rest"
    +)
    +
    +type ResourceV1alpha3Interface interface {
    +	RESTClient() rest.Interface
    +	DeviceClassesGetter
    +	PodSchedulingContextsGetter
    +	ResourceClaimsGetter
    +	ResourceClaimTemplatesGetter
    +	ResourceSlicesGetter
    +}
    +
    +// ResourceV1alpha3Client is used to interact with features provided by the resource.k8s.io group.
    +type ResourceV1alpha3Client struct {
    +	restClient rest.Interface
    +}
    +
    +func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface {
    +	return newDeviceClasses(c)
    +}
    +
    +func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface {
    +	return newPodSchedulingContexts(c, namespace)
    +}
    +
    +func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface {
    +	return newResourceClaims(c, namespace)
    +}
    +
    +func (c *ResourceV1alpha3Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface {
    +	return newResourceClaimTemplates(c, namespace)
    +}
    +
    +func (c *ResourceV1alpha3Client) ResourceSlices() ResourceSliceInterface {
    +	return newResourceSlices(c)
    +}
    +
    +// NewForConfig creates a new ResourceV1alpha3Client for the given config.
    +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
    +// where httpClient was generated with rest.HTTPClientFor(c).
    +func NewForConfig(c *rest.Config) (*ResourceV1alpha3Client, error) {
    +	config := *c
    +	if err := setConfigDefaults(&config); err != nil {
    +		return nil, err
    +	}
    +	httpClient, err := rest.HTTPClientFor(&config)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return NewForConfigAndClient(&config, httpClient)
    +}
    +
    +// NewForConfigAndClient creates a new ResourceV1alpha3Client for the given config and http client.
    +// Note the http client provided takes precedence over the configured transport values.
    +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha3Client, error) {
    +	config := *c
    +	if err := setConfigDefaults(&config); err != nil {
    +		return nil, err
    +	}
    +	client, err := rest.RESTClientForConfigAndClient(&config, h)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return &ResourceV1alpha3Client{client}, nil
    +}
    +
    +// NewForConfigOrDie creates a new ResourceV1alpha3Client for the given config and
    +// panics if there is an error in the config.
    +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha3Client {
    +	client, err := NewForConfig(c)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return client
    +}
    +
    +// New creates a new ResourceV1alpha3Client for the given RESTClient.
    +func New(c rest.Interface) *ResourceV1alpha3Client {
    +	return &ResourceV1alpha3Client{c}
    +}
    +
    +func setConfigDefaults(config *rest.Config) error {
    +	gv := v1alpha3.SchemeGroupVersion
    +	config.GroupVersion = &gv
    +	config.APIPath = "/apis"
    +	config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
    +
    +	if config.UserAgent == "" {
    +		config.UserAgent = rest.DefaultKubernetesUserAgent()
    +	}
    +
    +	return nil
    +}
    +
    +// RESTClient returns a RESTClient that is used to communicate
    +// with API server by this client implementation.
    +func (c *ResourceV1alpha3Client) RESTClient() rest.Interface {
    +	if c == nil {
    +		return nil
    +	}
    +	return c.restClient
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go
    new file mode 100644
    index 0000000000..2ac65c005e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go
    @@ -0,0 +1,73 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// ResourceClaimsGetter has a method to return a ResourceClaimInterface.
    +// A group's client should implement this interface.
    +type ResourceClaimsGetter interface {
    +	ResourceClaims(namespace string) ResourceClaimInterface
    +}
    +
    +// ResourceClaimInterface has methods to work with ResourceClaim resources.
    +type ResourceClaimInterface interface {
    +	Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error)
    +	Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    +	UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error)
    +	Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    +	ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error)
    +	ResourceClaimExpansion
    +}
    +
    +// resourceClaims implements ResourceClaimInterface
    +type resourceClaims struct {
    +	*gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]
    +}
    +
    +// newResourceClaims returns a ResourceClaims
    +func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims {
    +	return &resourceClaims{
    +		gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration](
    +			"resourceclaims",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} },
    +			func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go
    new file mode 100644
    index 0000000000..87997bfee5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface.
    +// A group's client should implement this interface.
    +type ResourceClaimTemplatesGetter interface {
    +	ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface
    +}
    +
    +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources.
    +type ResourceClaimTemplateInterface interface {
    +	Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error)
    +	Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error)
    +	Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error)
    +	ResourceClaimTemplateExpansion
    +}
    +
    +// resourceClaimTemplates implements ResourceClaimTemplateInterface
    +type resourceClaimTemplates struct {
    +	*gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]
    +}
    +
    +// newResourceClaimTemplates returns a ResourceClaimTemplates
    +func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates {
    +	return &resourceClaimTemplates{
    +		gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration](
    +			"resourceclaimtemplates",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} },
    +			func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go
    new file mode 100644
    index 0000000000..0819041408
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	"context"
    +
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// ResourceSlicesGetter has a method to return a ResourceSliceInterface.
    +// A group's client should implement this interface.
    +type ResourceSlicesGetter interface {
    +	ResourceSlices() ResourceSliceInterface
    +}
    +
    +// ResourceSliceInterface has methods to work with ResourceSlice resources.
    +type ResourceSliceInterface interface {
    +	Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error)
    +	Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error)
    +	Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error)
    +	ResourceSliceExpansion
    +}
    +
    +// resourceSlices implements ResourceSliceInterface
    +type resourceSlices struct {
    +	*gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]
    +}
    +
    +// newResourceSlices returns a ResourceSlices
    +func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices {
    +	return &resourceSlices{
    +		gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration](
    +			"resourceslices",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} },
    +			func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go
    index 40ab9fb407..92847184bc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go
    @@ -43,20 +43,22 @@ var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass")
     
     // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
     func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) {
    +	emptyResult := &v1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1.PriorityClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityClass), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
     func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) {
    +	emptyResult := &v1.PriorityClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1.PriorityClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested priorityClasses.
     func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts))
     }
     
     // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) {
    +	emptyResult := &v1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityClass), err
     }
     
     // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) {
    +	emptyResult := &v1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts meta
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.PriorityClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.
     
     // Patch applies the patch and returns the patched priorityClass.
     func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) {
    +	emptyResult := &v1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli
     	if name == nil {
     		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.PriorityClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
    index c68ec5da41..a28ef2fd4a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/scheduling/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityClassesGetter has a method to return a PriorityClassInterface.
    @@ -55,143 +52,18 @@ type PriorityClassInterface interface {
     
     // priorityClasses implements PriorityClassInterface
     type priorityClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]
     }
     
     // newPriorityClasses returns a PriorityClasses
     func newPriorityClasses(c *SchedulingV1Client) *priorityClasses {
     	return &priorityClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration](
    +			"priorityclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.PriorityClass { return &v1.PriorityClass{} },
    +			func() *v1.PriorityClassList { return &v1.PriorityClassList{} }),
     	}
     }
    -
    -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
    -func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) {
    -	result = &v1.PriorityClass{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
    -func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.PriorityClassList{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityClasses.
    -func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) {
    -	result = &v1.PriorityClass{}
    -	err = c.client.Post().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) {
    -	result = &v1.PriorityClass{}
    -	err = c.client.Put().
    -		Resource("priorityclasses").
    -		Name(priorityClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
    -func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityClass.
    -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) {
    -	result = &v1.PriorityClass{}
    -	err = c.client.Patch(pt).
    -		Resource("priorityclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass.
    -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) {
    -	if priorityClass == nil {
    -		return nil, fmt.Errorf("priorityClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
    -	}
    -	result = &v1.PriorityClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("priorityclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
    index 3c8404a725..055d458a3c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
    @@ -43,20 +43,22 @@ var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass")
     
     // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
     func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
    +	emptyResult := &v1alpha1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.PriorityClass), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
     func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
    +	emptyResult := &v1alpha1.PriorityClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested priorityClasses.
     func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts))
     }
     
     // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) {
    +	emptyResult := &v1alpha1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.PriorityClass), err
     }
     
     // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) {
    +	emptyResult := &v1alpha1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.PriorityClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele
     
     // Patch applies the patch and returns the patched priorityClass.
     func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
    +	emptyResult := &v1alpha1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.PriorityClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli
     	if name == nil {
     		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.PriorityClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
    index a9b8c19c78..5c78f3de9f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/scheduling/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityClassesGetter has a method to return a PriorityClassInterface.
    @@ -55,143 +52,18 @@ type PriorityClassInterface interface {
     
     // priorityClasses implements PriorityClassInterface
     type priorityClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]
     }
     
     // newPriorityClasses returns a PriorityClasses
     func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses {
     	return &priorityClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration](
    +			"priorityclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} },
    +			func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }),
     	}
     }
    -
    -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
    -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
    -	result = &v1alpha1.PriorityClass{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
    -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.PriorityClassList{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityClasses.
    -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) {
    -	result = &v1alpha1.PriorityClass{}
    -	err = c.client.Post().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) {
    -	result = &v1alpha1.PriorityClass{}
    -	err = c.client.Put().
    -		Resource("priorityclasses").
    -		Name(priorityClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
    -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityClass.
    -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
    -	result = &v1alpha1.PriorityClass{}
    -	err = c.client.Patch(pt).
    -		Resource("priorityclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass.
    -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) {
    -	if priorityClass == nil {
    -		return nil, fmt.Errorf("priorityClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.PriorityClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("priorityclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
    index 4cf2e26c77..49d82a7ed9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
    @@ -43,20 +43,22 @@ var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass")
     
     // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
     func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
    +	emptyResult := &v1beta1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityClass), err
     }
     
     // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
     func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
    +	emptyResult := &v1beta1.PriorityClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re
     // Watch returns a watch.Interface that watches the requested priorityClasses.
     func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts))
     }
     
     // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) {
    +	emptyResult := &v1beta1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityClass), err
     }
     
     // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
     func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) {
    +	emptyResult := &v1beta1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele
     
     // Patch applies the patch and returns the patched priorityClass.
     func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) {
    +	emptyResult := &v1beta1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli
     	if name == nil {
     		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.PriorityClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.PriorityClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
    index 155476e4c7..9fef1d7596 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/scheduling/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // PriorityClassesGetter has a method to return a PriorityClassInterface.
    @@ -55,143 +52,18 @@ type PriorityClassInterface interface {
     
     // priorityClasses implements PriorityClassInterface
     type priorityClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]
     }
     
     // newPriorityClasses returns a PriorityClasses
     func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses {
     	return &priorityClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration](
    +			"priorityclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} },
    +			func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }),
     	}
     }
    -
    -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
    -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
    -	result = &v1beta1.PriorityClass{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
    -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.PriorityClassList{}
    -	err = c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested priorityClasses.
    -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) {
    -	result = &v1beta1.PriorityClass{}
    -	err = c.client.Post().
    -		Resource("priorityclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
    -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) {
    -	result = &v1beta1.PriorityClass{}
    -	err = c.client.Put().
    -		Resource("priorityclasses").
    -		Name(priorityClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(priorityClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
    -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("priorityclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched priorityClass.
    -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) {
    -	result = &v1beta1.PriorityClass{}
    -	err = c.client.Patch(pt).
    -		Resource("priorityclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass.
    -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) {
    -	if priorityClass == nil {
    -		return nil, fmt.Errorf("priorityClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(priorityClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := priorityClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("priorityClass.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.PriorityClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("priorityclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
    index d9dc4151e2..2e14db6000 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/storage/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSIDriversGetter has a method to return a CSIDriverInterface.
    @@ -55,143 +52,18 @@ type CSIDriverInterface interface {
     
     // cSIDrivers implements CSIDriverInterface
     type cSIDrivers struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]
     }
     
     // newCSIDrivers returns a CSIDrivers
     func newCSIDrivers(c *StorageV1Client) *cSIDrivers {
     	return &cSIDrivers{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration](
    +			"csidrivers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.CSIDriver { return &v1.CSIDriver{} },
    +			func() *v1.CSIDriverList { return &v1.CSIDriverList{} }),
     	}
     }
    -
    -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
    -func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) {
    -	result = &v1.CSIDriver{}
    -	err = c.client.Get().
    -		Resource("csidrivers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
    -func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.CSIDriverList{}
    -	err = c.client.Get().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSIDrivers.
    -func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
    -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) {
    -	result = &v1.CSIDriver{}
    -	err = c.client.Post().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIDriver).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
    -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) {
    -	result = &v1.CSIDriver{}
    -	err = c.client.Put().
    -		Resource("csidrivers").
    -		Name(cSIDriver.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIDriver).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs.
    -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("csidrivers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("csidrivers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSIDriver.
    -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) {
    -	result = &v1.CSIDriver{}
    -	err = c.client.Patch(pt).
    -		Resource("csidrivers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver.
    -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) {
    -	if cSIDriver == nil {
    -		return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSIDriver)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSIDriver.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply")
    -	}
    -	result = &v1.CSIDriver{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("csidrivers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
    index 17dbc8c1c8..6d28d7ed11 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/storage/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSINodesGetter has a method to return a CSINodeInterface.
    @@ -55,143 +52,18 @@ type CSINodeInterface interface {
     
     // cSINodes implements CSINodeInterface
     type cSINodes struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]
     }
     
     // newCSINodes returns a CSINodes
     func newCSINodes(c *StorageV1Client) *cSINodes {
     	return &cSINodes{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration](
    +			"csinodes",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.CSINode { return &v1.CSINode{} },
    +			func() *v1.CSINodeList { return &v1.CSINodeList{} }),
     	}
     }
    -
    -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
    -func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) {
    -	result = &v1.CSINode{}
    -	err = c.client.Get().
    -		Resource("csinodes").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSINodes that match those selectors.
    -func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.CSINodeList{}
    -	err = c.client.Get().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSINodes.
    -func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
    -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) {
    -	result = &v1.CSINode{}
    -	err = c.client.Post().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSINode).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
    -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) {
    -	result = &v1.CSINode{}
    -	err = c.client.Put().
    -		Resource("csinodes").
    -		Name(cSINode.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSINode).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs.
    -func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("csinodes").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("csinodes").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSINode.
    -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) {
    -	result = &v1.CSINode{}
    -	err = c.client.Patch(pt).
    -		Resource("csinodes").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode.
    -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) {
    -	if cSINode == nil {
    -		return nil, fmt.Errorf("cSINode provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSINode)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSINode.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSINode.Name must be provided to Apply")
    -	}
    -	result = &v1.CSINode{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("csinodes").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
    index 6bb50e0da9..8a762b9fff 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/storage/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface.
    @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface {
     
     // cSIStorageCapacities implements CSIStorageCapacityInterface
     type cSIStorageCapacities struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]
     }
     
     // newCSIStorageCapacities returns a CSIStorageCapacities
     func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities {
     	return &cSIStorageCapacities{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration](
    +			"csistoragecapacities",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} },
    +			func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }),
     	}
     }
    -
    -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
    -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) {
    -	result = &v1.CSIStorageCapacity{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
    -func (c *cSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.CSIStorageCapacityList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
    -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) {
    -	result = &v1.CSIStorageCapacity{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) {
    -	result = &v1.CSIStorageCapacity{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(cSIStorageCapacity.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs.
    -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) {
    -	result = &v1.CSIStorageCapacity{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) {
    -	if cSIStorageCapacity == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSIStorageCapacity)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSIStorageCapacity.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
    -	}
    -	result = &v1.CSIStorageCapacity{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go
    index 4983227376..1df7c034bb 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go
    @@ -43,20 +43,22 @@ var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver")
     
     // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
     func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) {
    +	emptyResult := &v1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(csidriversResource, name), &v1.CSIDriver{})
    +		Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIDriver), err
     }
     
     // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
     func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) {
    +	emptyResult := &v1.CSIDriverList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1.CSIDriverList{})
    +		Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested cSIDrivers.
     func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts))
     }
     
     // Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
     func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) {
    +	emptyResult := &v1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1.CSIDriver{})
    +		Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIDriver), err
     }
     
     // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
     func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) {
    +	emptyResult := &v1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1.CSIDriver{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIDriver), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.CSIDriverList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.Delet
     
     // Patch applies the patch and returns the patched cSIDriver.
     func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) {
    +	emptyResult := &v1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1.CSIDriver{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIDriver), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriv
     	if name == nil {
     		return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1.CSIDriver{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIDriver), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go
    index 0271a20f3d..e2b8e8cc8d 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go
    @@ -43,20 +43,22 @@ var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode")
     
     // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
     func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) {
    +	emptyResult := &v1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(csinodesResource, name), &v1.CSINode{})
    +		Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSINode), err
     }
     
     // List takes label and field selectors, and returns the list of CSINodes that match those selectors.
     func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) {
    +	emptyResult := &v1.CSINodeList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1.CSINodeList{})
    +		Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (resul
     // Watch returns a watch.Interface that watches the requested cSINodes.
     func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts))
     }
     
     // Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
     func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) {
    +	emptyResult := &v1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1.CSINode{})
    +		Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSINode), err
     }
     
     // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
     func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) {
    +	emptyResult := &v1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1.CSINode{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSINode), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.Dele
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.CSINodeList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteO
     
     // Patch applies the patch and returns the patched cSINode.
     func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) {
    +	emptyResult := &v1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1.CSINode{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSINode), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeAppl
     	if name == nil {
     		return nil, fmt.Errorf("cSINode.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1.CSINode{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSINode), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go
    index b12bbe3c15..a86014855e 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go
    @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacit
     
     // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
     func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1.CSIStorageCapacity{})
    +		Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIStorageCapacity), err
     }
     
     // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
     func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) {
    +	emptyResult := &v1.CSIStorageCapacityList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1.CSIStorageCapacityList{})
    +		Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOpt
     // Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
     func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{})
    +		Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIStorageCapacity), err
     }
     
     // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{})
    +		Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIStorageCapacity), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts me
     
     // Patch applies the patch and returns the patched cSIStorageCapacity.
     func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIStorageCapacity), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity
     	if name == nil {
     		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.CSIStorageCapacity), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
    index e232f4c8d7..8910be1db9 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
    @@ -43,20 +43,22 @@ var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass")
     
     // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
     func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) {
    +	emptyResult := &v1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StorageClass), err
     }
     
     // List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
     func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) {
    +	emptyResult := &v1.StorageClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1.StorageClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions)
     // Watch returns a watch.Interface that watches the requested storageClasses.
     func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts))
     }
     
     // Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
     func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) {
    +	emptyResult := &v1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StorageClass), err
     }
     
     // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
     func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) {
    +	emptyResult := &v1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StorageClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.StorageClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.D
     
     // Patch applies the patch and returns the patched storageClass.
     func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) {
    +	emptyResult := &v1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1.StorageClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StorageClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1.
     	if name == nil {
     		return nil, fmt.Errorf("storageClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1.StorageClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.StorageClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go
    index 3f5f2aec57..3d3d71ec50 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go
    @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment")
     
     // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
     func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) {
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
     
     // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
     func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) {
    +	emptyResult := &v1.VolumeAttachmentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1.VolumeAttachmentList{})
    +		Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOption
     // Watch returns a watch.Interface that watches the requested volumeAttachments.
     func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts))
     }
     
     // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) {
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
     
     // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) {
    +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts me
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav
     
     // Patch applies the patch and returns the patched volumeAttachment.
     func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) {
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1.VolumeAttachment), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
    index 8e97d90a0f..d7b6ff68aa 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/storage/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StorageClassesGetter has a method to return a StorageClassInterface.
    @@ -55,143 +52,18 @@ type StorageClassInterface interface {
     
     // storageClasses implements StorageClassInterface
     type storageClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]
     }
     
     // newStorageClasses returns a StorageClasses
     func newStorageClasses(c *StorageV1Client) *storageClasses {
     	return &storageClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration](
    +			"storageclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.StorageClass { return &v1.StorageClass{} },
    +			func() *v1.StorageClassList { return &v1.StorageClassList{} }),
     	}
     }
    -
    -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
    -func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) {
    -	result = &v1.StorageClass{}
    -	err = c.client.Get().
    -		Resource("storageclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
    -func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.StorageClassList{}
    -	err = c.client.Get().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested storageClasses.
    -func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
    -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) {
    -	result = &v1.StorageClass{}
    -	err = c.client.Post().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
    -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) {
    -	result = &v1.StorageClass{}
    -	err = c.client.Put().
    -		Resource("storageclasses").
    -		Name(storageClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
    -func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("storageclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("storageclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched storageClass.
    -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) {
    -	result = &v1.StorageClass{}
    -	err = c.client.Patch(pt).
    -		Resource("storageclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass.
    -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) {
    -	if storageClass == nil {
    -		return nil, fmt.Errorf("storageClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := storageClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageClass.Name must be provided to Apply")
    -	}
    -	result = &v1.StorageClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
    index c1dbec84f4..3a0404284f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
    @@ -20,17 +20,14 @@ package v1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1 "k8s.io/api/storage/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
    @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface {
     type VolumeAttachmentInterface interface {
     	Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error)
     	Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
     	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
    @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface {
     	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error)
     	Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error)
     	VolumeAttachmentExpansion
     }
     
     // volumeAttachments implements VolumeAttachmentInterface
     type volumeAttachments struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]
     }
     
     // newVolumeAttachments returns a VolumeAttachments
     func newVolumeAttachments(c *StorageV1Client) *volumeAttachments {
     	return &volumeAttachments{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration](
    +			"volumeattachments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} },
    +			func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }),
     	}
     }
    -
    -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
    -func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) {
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
    -func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1.VolumeAttachmentList{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested volumeAttachments.
    -func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) {
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Post().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
    -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched volumeAttachment.
    -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) {
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Patch(pt).
    -		Resource("volumeattachments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment.
    -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
    index bf5d64dddc..6819deff62 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/storage/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface.
    @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface {
     
     // cSIStorageCapacities implements CSIStorageCapacityInterface
     type cSIStorageCapacities struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]
     }
     
     // newCSIStorageCapacities returns a CSIStorageCapacities
     func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities {
     	return &cSIStorageCapacities{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration](
    +			"csistoragecapacities",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} },
    +			func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }),
     	}
     }
    -
    -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
    -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    -	result = &v1alpha1.CSIStorageCapacity{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
    -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.CSIStorageCapacityList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
    -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    -	result = &v1alpha1.CSIStorageCapacity{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    -	result = &v1alpha1.CSIStorageCapacity{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(cSIStorageCapacity.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs.
    -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) {
    -	result = &v1alpha1.CSIStorageCapacity{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    -	if cSIStorageCapacity == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSIStorageCapacity)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSIStorageCapacity.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.CSIStorageCapacity{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go
    index c1614cda7d..0bcaccd208 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go
    @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageC
     
     // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
     func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1alpha1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1alpha1.CSIStorageCapacity{})
    +		Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.CSIStorageCapacity), err
     }
     
     // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
     func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) {
    +	emptyResult := &v1alpha1.CSIStorageCapacityList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1alpha1.CSIStorageCapacityList{})
    +		Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions
     // Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
     func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1alpha1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{})
    +		Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.CSIStorageCapacity), err
     }
     
     // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1alpha1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{})
    +		Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.CSIStorageCapacity), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.CSIStorageCapacityList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1
     
     // Patch applies the patch and returns the patched cSIStorageCapacity.
     func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1alpha1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1alpha1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.CSIStorageCapacity), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity
     	if name == nil {
     		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.CSIStorageCapacity), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
    index 9725d6d10b..a07247f8f3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
    @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachme
     
     // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
     func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
     
     // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
     func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachmentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{})
    +		Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (
     // Watch returns a watch.Interface that watches the requested volumeAttachments.
     func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts))
     }
     
     // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
     
     // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) {
    +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De
     
     // Patch applies the patch and returns the patched volumeAttachment.
     func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttachment), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go
    index d25263df48..0d7fe9aa8c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go
    @@ -43,20 +43,22 @@ var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAt
     
     // Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any.
     func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1alpha1.VolumeAttributesClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttributesClass), err
     }
     
     // List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors.
     func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) {
    +	emptyResult := &v1alpha1.VolumeAttributesClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOpti
     // Watch returns a watch.Interface that watches the requested volumeAttributesClasses.
     func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts))
     }
     
     // Create takes the representation of a volumeAttributesClass and creates it.  Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
     func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1alpha1.VolumeAttributesClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttributesClass), err
     }
     
     // Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
     func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1alpha1.VolumeAttributesClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttributesClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, o
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts
     
     // Patch applies the patch and returns the patched volumeAttributesClass.
     func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1alpha1.VolumeAttributesClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttributesClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttribute
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.VolumeAttributesClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.VolumeAttributesClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
    index 58abb748f9..0982d5568a 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/storage/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
    @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface {
     type VolumeAttachmentInterface interface {
     	Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error)
     	Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error)
     	Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error)
     	VolumeAttachmentExpansion
     }
     
     // volumeAttachments implements VolumeAttachmentInterface
     type volumeAttachments struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]
     }
     
     // newVolumeAttachments returns a VolumeAttachments
     func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments {
     	return &volumeAttachments{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration](
    +			"volumeattachments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} },
    +			func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }),
     	}
     }
    -
    -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
    -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
    -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.VolumeAttachmentList{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested volumeAttachments.
    -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Post().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
    -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched volumeAttachment.
    -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Patch(pt).
    -		Resource("volumeattachments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment.
    -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
    index 6633a4dc15..40cff75883 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/storage/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface.
    @@ -55,143 +52,18 @@ type VolumeAttributesClassInterface interface {
     
     // volumeAttributesClasses implements VolumeAttributesClassInterface
     type volumeAttributesClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]
     }
     
     // newVolumeAttributesClasses returns a VolumeAttributesClasses
     func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses {
     	return &volumeAttributesClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration](
    +			"volumeattributesclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} },
    +			func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }),
     	}
     }
    -
    -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any.
    -func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    -	result = &v1alpha1.VolumeAttributesClass{}
    -	err = c.client.Get().
    -		Resource("volumeattributesclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors.
    -func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.VolumeAttributesClassList{}
    -	err = c.client.Get().
    -		Resource("volumeattributesclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses.
    -func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("volumeattributesclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a volumeAttributesClass and creates it.  Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
    -func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    -	result = &v1alpha1.VolumeAttributesClass{}
    -	err = c.client.Post().
    -		Resource("volumeattributesclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttributesClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
    -func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    -	result = &v1alpha1.VolumeAttributesClass{}
    -	err = c.client.Put().
    -		Resource("volumeattributesclasses").
    -		Name(volumeAttributesClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttributesClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs.
    -func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("volumeattributesclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("volumeattributesclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched volumeAttributesClass.
    -func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) {
    -	result = &v1alpha1.VolumeAttributesClass{}
    -	err = c.client.Patch(pt).
    -		Resource("volumeattributesclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass.
    -func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) {
    -	if volumeAttributesClass == nil {
    -		return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttributesClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := volumeAttributesClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.VolumeAttributesClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattributesclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
    index 04e677db05..2748919b40 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/storage/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSIDriversGetter has a method to return a CSIDriverInterface.
    @@ -55,143 +52,18 @@ type CSIDriverInterface interface {
     
     // cSIDrivers implements CSIDriverInterface
     type cSIDrivers struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]
     }
     
     // newCSIDrivers returns a CSIDrivers
     func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers {
     	return &cSIDrivers{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration](
    +			"csidrivers",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} },
    +			func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }),
     	}
     }
    -
    -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
    -func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) {
    -	result = &v1beta1.CSIDriver{}
    -	err = c.client.Get().
    -		Resource("csidrivers").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
    -func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.CSIDriverList{}
    -	err = c.client.Get().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSIDrivers.
    -func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
    -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) {
    -	result = &v1beta1.CSIDriver{}
    -	err = c.client.Post().
    -		Resource("csidrivers").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIDriver).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
    -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) {
    -	result = &v1beta1.CSIDriver{}
    -	err = c.client.Put().
    -		Resource("csidrivers").
    -		Name(cSIDriver.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIDriver).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs.
    -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("csidrivers").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("csidrivers").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSIDriver.
    -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) {
    -	result = &v1beta1.CSIDriver{}
    -	err = c.client.Patch(pt).
    -		Resource("csidrivers").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver.
    -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) {
    -	if cSIDriver == nil {
    -		return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSIDriver)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSIDriver.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.CSIDriver{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("csidrivers").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
    index c3760b5ce5..fe6fe228e3 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/storage/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSINodesGetter has a method to return a CSINodeInterface.
    @@ -55,143 +52,18 @@ type CSINodeInterface interface {
     
     // cSINodes implements CSINodeInterface
     type cSINodes struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]
     }
     
     // newCSINodes returns a CSINodes
     func newCSINodes(c *StorageV1beta1Client) *cSINodes {
     	return &cSINodes{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration](
    +			"csinodes",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.CSINode { return &v1beta1.CSINode{} },
    +			func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }),
     	}
     }
    -
    -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
    -func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) {
    -	result = &v1beta1.CSINode{}
    -	err = c.client.Get().
    -		Resource("csinodes").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSINodes that match those selectors.
    -func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.CSINodeList{}
    -	err = c.client.Get().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSINodes.
    -func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
    -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) {
    -	result = &v1beta1.CSINode{}
    -	err = c.client.Post().
    -		Resource("csinodes").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSINode).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
    -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) {
    -	result = &v1beta1.CSINode{}
    -	err = c.client.Put().
    -		Resource("csinodes").
    -		Name(cSINode.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSINode).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs.
    -func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("csinodes").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("csinodes").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSINode.
    -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) {
    -	result = &v1beta1.CSINode{}
    -	err = c.client.Patch(pt).
    -		Resource("csinodes").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode.
    -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) {
    -	if cSINode == nil {
    -		return nil, fmt.Errorf("cSINode provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSINode)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSINode.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSINode.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.CSINode{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("csinodes").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
    index 98ba936dc4..e9ffc1df92 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/storage/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface.
    @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface {
     
     // cSIStorageCapacities implements CSIStorageCapacityInterface
     type cSIStorageCapacities struct {
    -	client rest.Interface
    -	ns     string
    +	*gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]
     }
     
     // newCSIStorageCapacities returns a CSIStorageCapacities
     func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities {
     	return &cSIStorageCapacities{
    -		client: c.RESTClient(),
    -		ns:     namespace,
    +		gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration](
    +			"csistoragecapacities",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			namespace,
    +			func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} },
    +			func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }),
     	}
     }
    -
    -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
    -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    -	result = &v1beta1.CSIStorageCapacity{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
    -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.CSIStorageCapacityList{}
    -	err = c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
    -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    -	result = &v1beta1.CSIStorageCapacity{}
    -	err = c.client.Post().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
    -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    -	result = &v1beta1.CSIStorageCapacity{}
    -	err = c.client.Put().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(cSIStorageCapacity.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(cSIStorageCapacity).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs.
    -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) {
    -	result = &v1beta1.CSIStorageCapacity{}
    -	err = c.client.Patch(pt).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity.
    -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    -	if cSIStorageCapacity == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(cSIStorageCapacity)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := cSIStorageCapacity.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.CSIStorageCapacity{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Namespace(c.ns).
    -		Resource("csistoragecapacities").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go
    index 4257aa6183..2b230707fe 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go
    @@ -43,20 +43,22 @@ var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver")
     
     // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
     func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) {
    +	emptyResult := &v1beta1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(csidriversResource, name), &v1beta1.CSIDriver{})
    +		Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIDriver), err
     }
     
     // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
     func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) {
    +	emptyResult := &v1beta1.CSIDriverList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1beta1.CSIDriverList{})
    +		Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result
     // Watch returns a watch.Interface that watches the requested cSIDrivers.
     func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts))
     }
     
     // Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
     func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) {
    +	emptyResult := &v1beta1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{})
    +		Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIDriver), err
     }
     
     // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
     func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) {
    +	emptyResult := &v1beta1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIDriver), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.Delete
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOpt
     
     // Patch applies the patch and returns the patched cSIDriver.
     func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) {
    +	emptyResult := &v1beta1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIDriver), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CS
     	if name == nil {
     		return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CSIDriver{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1beta1.CSIDriver{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIDriver), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go
    index d38c104bc1..c5c2b58250 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go
    @@ -43,20 +43,22 @@ var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode")
     
     // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
     func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) {
    +	emptyResult := &v1beta1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(csinodesResource, name), &v1beta1.CSINode{})
    +		Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSINode), err
     }
     
     // List takes label and field selectors, and returns the list of CSINodes that match those selectors.
     func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) {
    +	emptyResult := &v1beta1.CSINodeList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1beta1.CSINodeList{})
    +		Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v
     // Watch returns a watch.Interface that watches the requested cSINodes.
     func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts))
     }
     
     // Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
     func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) {
    +	emptyResult := &v1beta1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{})
    +		Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSINode), err
     }
     
     // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
     func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) {
    +	emptyResult := &v1beta1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSINode), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOp
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptio
     
     // Patch applies the patch and returns the patched cSINode.
     func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) {
    +	emptyResult := &v1beta1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSINode), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINod
     	if name == nil {
     		return nil, fmt.Errorf("cSINode.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CSINode{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1beta1.CSINode{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSINode), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go
    index d7bbb614b2..59a9aaf9df 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go
    @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCa
     
     // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any.
     func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1beta1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1beta1.CSIStorageCapacity{})
    +		Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIStorageCapacity), err
     }
     
     // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors.
     func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) {
    +	emptyResult := &v1beta1.CSIStorageCapacityList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1beta1.CSIStorageCapacityList{})
    +		Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions
     // Watch returns a watch.Interface that watches the requested cSIStorageCapacities.
     func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts))
    +		InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts))
     
     }
     
     // Create takes the representation of a cSIStorageCapacity and creates it.  Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1beta1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{})
    +		Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIStorageCapacity), err
     }
     
     // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any.
     func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1beta1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{})
    +		Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIStorageCapacity), err
     }
    @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts)
    +	action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.CSIStorageCapacityList{})
     	return err
    @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1
     
     // Patch applies the patch and returns the patched cSIStorageCapacity.
     func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) {
    +	emptyResult := &v1beta1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1beta1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIStorageCapacity), err
     }
    @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity
     	if name == nil {
     		return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.CSIStorageCapacity{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CSIStorageCapacity{})
    +		Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.CSIStorageCapacity), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
    index 6b5bb02fda..470281607f 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
    @@ -48,6 +48,10 @@ func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterfa
     	return &FakeVolumeAttachments{c}
     }
     
    +func (c *FakeStorageV1beta1) VolumeAttributesClasses() v1beta1.VolumeAttributesClassInterface {
    +	return &FakeVolumeAttributesClasses{c}
    +}
    +
     // RESTClient returns a RESTClient that is used to communicate
     // with API server by this client implementation.
     func (c *FakeStorageV1beta1) RESTClient() rest.Interface {
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
    index 869e58b4f7..954a346081 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
    @@ -43,20 +43,22 @@ var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass")
     
     // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
     func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
    +	emptyResult := &v1beta1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{})
    +		Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StorageClass), err
     }
     
     // List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
     func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
    +	emptyResult := &v1beta1.StorageClassList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{})
    +		Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (res
     // Watch returns a watch.Interface that watches the requested storageClasses.
     func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts))
     }
     
     // Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
     func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) {
    +	emptyResult := &v1beta1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
    +		Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StorageClass), err
     }
     
     // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
     func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) {
    +	emptyResult := &v1beta1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StorageClass), err
     }
    @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.De
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{})
     	return err
    @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.Delet
     
     // Patch applies the patch and returns the patched storageClass.
     func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) {
    +	emptyResult := &v1beta1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StorageClass), err
     }
    @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1b
     	if name == nil {
     		return nil, fmt.Errorf("storageClass.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.StorageClass{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1beta1.StorageClass{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.StorageClass), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
    index e2b4a2eb1b..247f7ca627 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
    @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachmen
     
     // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
     func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
     
     // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
     func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
    +	emptyResult := &v1beta1.VolumeAttachmentList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{})
    +		Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (
     // Watch returns a watch.Interface that watches the requested volumeAttachments.
     func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts))
     }
     
     // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) {
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
     
     // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
     func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) {
    +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De
     
     // Patch applies the patch and returns the patched volumeAttachment.
     func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen
     	if name == nil {
     		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
     	}
    +	emptyResult := &v1beta1.VolumeAttachment{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.VolumeAttachment{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1beta1.VolumeAttachment), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go
    new file mode 100644
    index 0000000000..3cef7291ab
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go
    @@ -0,0 +1,151 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package fake
    +
    +import (
    +	"context"
    +	json "encoding/json"
    +	"fmt"
    +
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	labels "k8s.io/apimachinery/pkg/labels"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	testing "k8s.io/client-go/testing"
    +)
    +
    +// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface
    +type FakeVolumeAttributesClasses struct {
    +	Fake *FakeStorageV1beta1
    +}
    +
    +var volumeattributesclassesResource = v1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses")
    +
    +var volumeattributesclassesKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass")
    +
    +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any.
    +func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1beta1.VolumeAttributesClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.VolumeAttributesClass), err
    +}
    +
    +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors.
    +func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttributesClassList, err error) {
    +	emptyResult := &v1beta1.VolumeAttributesClassList{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +
    +	label, _, _ := testing.ExtractFromListOptions(opts)
    +	if label == nil {
    +		label = labels.Everything()
    +	}
    +	list := &v1beta1.VolumeAttributesClassList{ListMeta: obj.(*v1beta1.VolumeAttributesClassList).ListMeta}
    +	for _, item := range obj.(*v1beta1.VolumeAttributesClassList).Items {
    +		if label.Matches(labels.Set(item.Labels)) {
    +			list.Items = append(list.Items, item)
    +		}
    +	}
    +	return list, err
    +}
    +
    +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses.
    +func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    +	return c.Fake.
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts))
    +}
    +
    +// Create takes the representation of a volumeAttributesClass and creates it.  Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
    +func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1beta1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1beta1.VolumeAttributesClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.VolumeAttributesClass), err
    +}
    +
    +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any.
    +func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1beta1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1beta1.VolumeAttributesClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.VolumeAttributesClass), err
    +}
    +
    +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs.
    +func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    +	_, err := c.Fake.
    +		Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1beta1.VolumeAttributesClass{})
    +	return err
    +}
    +
    +// DeleteCollection deletes a collection of objects.
    +func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    +	action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts)
    +
    +	_, err := c.Fake.Invokes(action, &v1beta1.VolumeAttributesClassList{})
    +	return err
    +}
    +
    +// Patch applies the patch and returns the patched volumeAttributesClass.
    +func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) {
    +	emptyResult := &v1beta1.VolumeAttributesClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.VolumeAttributesClass), err
    +}
    +
    +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass.
    +func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) {
    +	if volumeAttributesClass == nil {
    +		return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil")
    +	}
    +	data, err := json.Marshal(volumeAttributesClass)
    +	if err != nil {
    +		return nil, err
    +	}
    +	name := volumeAttributesClass.Name
    +	if name == nil {
    +		return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply")
    +	}
    +	emptyResult := &v1beta1.VolumeAttributesClass{}
    +	obj, err := c.Fake.
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
    +	if obj == nil {
    +		return emptyResult, err
    +	}
    +	return obj.(*v1beta1.VolumeAttributesClass), err
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
    index 1a202a928e..ebf78e10bc 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
    @@ -27,3 +27,5 @@ type CSIStorageCapacityExpansion interface{}
     type StorageClassExpansion interface{}
     
     type VolumeAttachmentExpansion interface{}
    +
    +type VolumeAttributesClassExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
    index 4c7604bd29..3d1b59e36c 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
    @@ -33,6 +33,7 @@ type StorageV1beta1Interface interface {
     	CSIStorageCapacitiesGetter
     	StorageClassesGetter
     	VolumeAttachmentsGetter
    +	VolumeAttributesClassesGetter
     }
     
     // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group.
    @@ -60,6 +61,10 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface {
     	return newVolumeAttachments(c)
     }
     
    +func (c *StorageV1beta1Client) VolumeAttributesClasses() VolumeAttributesClassInterface {
    +	return newVolumeAttributesClasses(c)
    +}
    +
     // NewForConfig creates a new StorageV1beta1Client for the given config.
     // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
     // where httpClient was generated with rest.HTTPClientFor(c).
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
    index 9b4ef231c8..fed699cc85 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/storage/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StorageClassesGetter has a method to return a StorageClassInterface.
    @@ -55,143 +52,18 @@ type StorageClassInterface interface {
     
     // storageClasses implements StorageClassInterface
     type storageClasses struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]
     }
     
     // newStorageClasses returns a StorageClasses
     func newStorageClasses(c *StorageV1beta1Client) *storageClasses {
     	return &storageClasses{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration](
    +			"storageclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} },
    +			func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }),
     	}
     }
    -
    -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
    -func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
    -	result = &v1beta1.StorageClass{}
    -	err = c.client.Get().
    -		Resource("storageclasses").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
    -func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.StorageClassList{}
    -	err = c.client.Get().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested storageClasses.
    -func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
    -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) {
    -	result = &v1beta1.StorageClass{}
    -	err = c.client.Post().
    -		Resource("storageclasses").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
    -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) {
    -	result = &v1beta1.StorageClass{}
    -	err = c.client.Put().
    -		Resource("storageclasses").
    -		Name(storageClass.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageClass).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
    -func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("storageclasses").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("storageclasses").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched storageClass.
    -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) {
    -	result = &v1beta1.StorageClass{}
    -	err = c.client.Patch(pt).
    -		Resource("storageclasses").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass.
    -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) {
    -	if storageClass == nil {
    -		return nil, fmt.Errorf("storageClass provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageClass)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := storageClass.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageClass.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.StorageClass{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageclasses").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
    index 35a8b64fcc..01024ce485 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
    @@ -20,17 +20,14 @@ package v1beta1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1beta1 "k8s.io/api/storage/v1beta1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
    @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface {
     type VolumeAttachmentInterface interface {
     	Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error)
     	Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error)
     	Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error)
     	VolumeAttachmentExpansion
     }
     
     // volumeAttachments implements VolumeAttachmentInterface
     type volumeAttachments struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]
     }
     
     // newVolumeAttachments returns a VolumeAttachments
     func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments {
     	return &volumeAttachments{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration](
    +			"volumeattachments",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} },
    +			func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }),
     	}
     }
    -
    -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
    -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
    -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1beta1.VolumeAttachmentList{}
    -	err = c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested volumeAttachments.
    -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Post().
    -		Resource("volumeattachments").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
    -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Put().
    -		Resource("volumeattachments").
    -		Name(volumeAttachment.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(volumeAttachment).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
    -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("volumeattachments").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched volumeAttachment.
    -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Patch(pt).
    -		Resource("volumeattachments").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment.
    -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) {
    -	if volumeAttachment == nil {
    -		return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(volumeAttachment)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := volumeAttachment.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply")
    -	}
    -
    -	result = &v1beta1.VolumeAttachment{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("volumeattachments").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..47eadcac63
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go
    @@ -0,0 +1,69 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by client-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	"context"
    +
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	types "k8s.io/apimachinery/pkg/types"
    +	watch "k8s.io/apimachinery/pkg/watch"
    +	storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
    +	gentype "k8s.io/client-go/gentype"
    +	scheme "k8s.io/client-go/kubernetes/scheme"
    +)
    +
    +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface.
    +// A group's client should implement this interface.
    +type VolumeAttributesClassesGetter interface {
    +	VolumeAttributesClasses() VolumeAttributesClassInterface
    +}
    +
    +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources.
    +type VolumeAttributesClassInterface interface {
    +	Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error)
    +	Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error)
    +	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
    +	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    +	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error)
    +	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error)
    +	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
    +	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error)
    +	Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error)
    +	VolumeAttributesClassExpansion
    +}
    +
    +// volumeAttributesClasses implements VolumeAttributesClassInterface
    +type volumeAttributesClasses struct {
    +	*gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]
    +}
    +
    +// newVolumeAttributesClasses returns a VolumeAttributesClasses
    +func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses {
    +	return &volumeAttributesClasses{
    +		gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration](
    +			"volumeattributesclasses",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} },
    +			func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }),
    +	}
    +}
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go
    index 9b5da88c72..c3ff235912 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go
    @@ -43,20 +43,22 @@ var storageversionmigrationsKind = v1alpha1.SchemeGroupVersion.WithKind("Storage
     
     // Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any.
     func (c *FakeStorageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootGetAction(storageversionmigrationsResource, name), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootGetActionWithOptions(storageversionmigrationsResource, name, options), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
     
     // List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors.
     func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigrationList{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootListAction(storageversionmigrationsResource, storageversionmigrationsKind, opts), &v1alpha1.StorageVersionMigrationList{})
    +		Invokes(testing.NewRootListActionWithOptions(storageversionmigrationsResource, storageversionmigrationsKind, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     
     	label, _, _ := testing.ExtractFromListOptions(opts)
    @@ -75,36 +77,39 @@ func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOpt
     // Watch returns a watch.Interface that watches the requested storageVersionMigrations.
     func (c *FakeStorageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
     	return c.Fake.
    -		InvokesWatch(testing.NewRootWatchAction(storageversionmigrationsResource, opts))
    +		InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionmigrationsResource, opts))
     }
     
     // Create takes the representation of a storageVersionMigration and creates it.  Returns the server's representation of the storageVersionMigration, and an error, if there is any.
     func (c *FakeStorageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootCreateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootCreateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
     
     // Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any.
     func (c *FakeStorageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootUpdateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
     
     // UpdateStatus was generated because the type contains a Status member.
     // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) {
    +func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootUpdateSubresourceAction(storageversionmigrationsResource, "status", storageVersionMigration), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionmigrationsResource, "status", storageVersionMigration, opts), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
    @@ -118,7 +123,7 @@ func (c *FakeStorageVersionMigrations) Delete(ctx context.Context, name string,
     
     // DeleteCollection deletes a collection of objects.
     func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	action := testing.NewRootDeleteCollectionAction(storageversionmigrationsResource, listOpts)
    +	action := testing.NewRootDeleteCollectionActionWithOptions(storageversionmigrationsResource, opts, listOpts)
     
     	_, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionMigrationList{})
     	return err
    @@ -126,10 +131,11 @@ func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opt
     
     // Patch applies the patch and returns the patched storageVersionMigration.
     func (c *FakeStorageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) {
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, name, pt, data, subresources...), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, name, pt, data, opts, subresources...), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
    @@ -147,10 +153,11 @@ func (c *FakeStorageVersionMigrations) Apply(ctx context.Context, storageVersion
     	if name == nil {
     		return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
    @@ -169,10 +176,11 @@ func (c *FakeStorageVersionMigrations) ApplyStatus(ctx context.Context, storageV
     	if name == nil {
     		return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply")
     	}
    +	emptyResult := &v1alpha1.StorageVersionMigration{}
     	obj, err := c.Fake.
    -		Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersionMigration{})
    +		Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
     	if obj == nil {
    -		return nil, err
    +		return emptyResult, err
     	}
     	return obj.(*v1alpha1.StorageVersionMigration), err
     }
    diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
    index be66a5b946..5fc0fd5197 100644
    --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
    +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
    @@ -20,17 +20,14 @@ package v1alpha1
     
     import (
     	"context"
    -	json "encoding/json"
    -	"fmt"
    -	"time"
     
     	v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
     	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	types "k8s.io/apimachinery/pkg/types"
     	watch "k8s.io/apimachinery/pkg/watch"
     	storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
    +	gentype "k8s.io/client-go/gentype"
     	scheme "k8s.io/client-go/kubernetes/scheme"
    -	rest "k8s.io/client-go/rest"
     )
     
     // StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface.
    @@ -43,6 +40,7 @@ type StorageVersionMigrationsGetter interface {
     type StorageVersionMigrationInterface interface {
     	Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error)
     	Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
     	UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
     	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
     	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
    @@ -51,193 +49,25 @@ type StorageVersionMigrationInterface interface {
     	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
     	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error)
     	Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error)
    +	// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
     	ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error)
     	StorageVersionMigrationExpansion
     }
     
     // storageVersionMigrations implements StorageVersionMigrationInterface
     type storageVersionMigrations struct {
    -	client rest.Interface
    +	*gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]
     }
     
     // newStorageVersionMigrations returns a StorageVersionMigrations
     func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations {
     	return &storageVersionMigrations{
    -		client: c.RESTClient(),
    +		gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration](
    +			"storageversionmigrations",
    +			c.RESTClient(),
    +			scheme.ParameterCodec,
    +			"",
    +			func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} },
    +			func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }),
     	}
     }
    -
    -// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any.
    -func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Get().
    -		Resource("storageversionmigrations").
    -		Name(name).
    -		VersionedParams(&options, scheme.ParameterCodec).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors.
    -func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	result = &v1alpha1.StorageVersionMigrationList{}
    -	err = c.client.Get().
    -		Resource("storageversionmigrations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Watch returns a watch.Interface that watches the requested storageVersionMigrations.
    -func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
    -	var timeout time.Duration
    -	if opts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
    -	}
    -	opts.Watch = true
    -	return c.client.Get().
    -		Resource("storageversionmigrations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Watch(ctx)
    -}
    -
    -// Create takes the representation of a storageVersionMigration and creates it.  Returns the server's representation of the storageVersionMigration, and an error, if there is any.
    -func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Post().
    -		Resource("storageversionmigrations").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersionMigration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any.
    -func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Put().
    -		Resource("storageversionmigrations").
    -		Name(storageVersionMigration.Name).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersionMigration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// UpdateStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
    -func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Put().
    -		Resource("storageversionmigrations").
    -		Name(storageVersionMigration.Name).
    -		SubResource("status").
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(storageVersionMigration).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs.
    -func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
    -	return c.client.Delete().
    -		Resource("storageversionmigrations").
    -		Name(name).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// DeleteCollection deletes a collection of objects.
    -func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
    -	var timeout time.Duration
    -	if listOpts.TimeoutSeconds != nil {
    -		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
    -	}
    -	return c.client.Delete().
    -		Resource("storageversionmigrations").
    -		VersionedParams(&listOpts, scheme.ParameterCodec).
    -		Timeout(timeout).
    -		Body(&opts).
    -		Do(ctx).
    -		Error()
    -}
    -
    -// Patch applies the patch and returns the patched storageVersionMigration.
    -func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) {
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Patch(pt).
    -		Resource("storageversionmigrations").
    -		Name(name).
    -		SubResource(subresources...).
    -		VersionedParams(&opts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration.
    -func (c *storageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	if storageVersionMigration == nil {
    -		return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageVersionMigration)
    -	if err != nil {
    -		return nil, err
    -	}
    -	name := storageVersionMigration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply")
    -	}
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageversionmigrations").
    -		Name(*name).
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    -
    -// ApplyStatus was generated because the type contains a Status member.
    -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
    -func (c *storageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) {
    -	if storageVersionMigration == nil {
    -		return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil")
    -	}
    -	patchOpts := opts.ToPatchOptions()
    -	data, err := json.Marshal(storageVersionMigration)
    -	if err != nil {
    -		return nil, err
    -	}
    -
    -	name := storageVersionMigration.Name
    -	if name == nil {
    -		return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply")
    -	}
    -
    -	result = &v1alpha1.StorageVersionMigration{}
    -	err = c.client.Patch(types.ApplyPatchType).
    -		Resource("storageversionmigrations").
    -		Name(*name).
    -		SubResource("status").
    -		VersionedParams(&patchOpts, scheme.ParameterCodec).
    -		Body(data).
    -		Do(ctx).
    -		Into(result)
    -	return
    -}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..9002ad6ea3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to
    +// MutatingWebhookConfigurationLister.
    +type MutatingWebhookConfigurationListerExpansion interface{}
    +
    +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyLister.
    +type ValidatingAdmissionPolicyListerExpansion interface{}
    +
    +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyBindingLister.
    +type ValidatingAdmissionPolicyBindingListerExpansion interface{}
    +
    +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to
    +// ValidatingWebhookConfigurationLister.
    +type ValidatingWebhookConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..4ab267e42e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations.
    +// All objects returned here must be treated as read-only.
    +type MutatingWebhookConfigurationLister interface {
    +	// List lists all MutatingWebhookConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error)
    +	// Get retrieves the MutatingWebhookConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.MutatingWebhookConfiguration, error)
    +	MutatingWebhookConfigurationListerExpansion
    +}
    +
    +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface.
    +type mutatingWebhookConfigurationLister struct {
    +	listers.ResourceIndexer[*v1.MutatingWebhookConfiguration]
    +}
    +
    +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister.
    +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister {
    +	return &mutatingWebhookConfigurationLister{listers.New[*v1.MutatingWebhookConfiguration](indexer, v1.Resource("mutatingwebhookconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..f233cdbe80
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyLister interface {
    +	// List lists all ValidatingAdmissionPolicies in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error)
    +	// Get retrieves the ValidatingAdmissionPolicy from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ValidatingAdmissionPolicy, error)
    +	ValidatingAdmissionPolicyListerExpansion
    +}
    +
    +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface.
    +type validatingAdmissionPolicyLister struct {
    +	listers.ResourceIndexer[*v1.ValidatingAdmissionPolicy]
    +}
    +
    +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister.
    +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister {
    +	return &validatingAdmissionPolicyLister{listers.New[*v1.ValidatingAdmissionPolicy](indexer, v1.Resource("validatingadmissionpolicy"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..450a066725
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyBindingLister interface {
    +	// List lists all ValidatingAdmissionPolicyBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error)
    +	// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error)
    +	ValidatingAdmissionPolicyBindingListerExpansion
    +}
    +
    +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface.
    +type validatingAdmissionPolicyBindingLister struct {
    +	listers.ResourceIndexer[*v1.ValidatingAdmissionPolicyBinding]
    +}
    +
    +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister.
    +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister {
    +	return &validatingAdmissionPolicyBindingLister{listers.New[*v1.ValidatingAdmissionPolicyBinding](indexer, v1.Resource("validatingadmissionpolicybinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..99045a6752
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/admissionregistration/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations.
    +// All objects returned here must be treated as read-only.
    +type ValidatingWebhookConfigurationLister interface {
    +	// List lists all ValidatingWebhookConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error)
    +	// Get retrieves the ValidatingWebhookConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ValidatingWebhookConfiguration, error)
    +	ValidatingWebhookConfigurationListerExpansion
    +}
    +
    +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface.
    +type validatingWebhookConfigurationLister struct {
    +	listers.ResourceIndexer[*v1.ValidatingWebhookConfiguration]
    +}
    +
    +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister.
    +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister {
    +	return &validatingWebhookConfigurationLister{listers.New[*v1.ValidatingWebhookConfiguration](indexer, v1.Resource("validatingwebhookconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..3f8b7819ce
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyLister.
    +type ValidatingAdmissionPolicyListerExpansion interface{}
    +
    +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyBindingLister.
    +type ValidatingAdmissionPolicyBindingListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..c3aec2d736
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyLister interface {
    +	// List lists all ValidatingAdmissionPolicies in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error)
    +	// Get retrieves the ValidatingAdmissionPolicy from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error)
    +	ValidatingAdmissionPolicyListerExpansion
    +}
    +
    +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface.
    +type validatingAdmissionPolicyLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicy]
    +}
    +
    +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister.
    +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister {
    +	return &validatingAdmissionPolicyLister{listers.New[*v1alpha1.ValidatingAdmissionPolicy](indexer, v1alpha1.Resource("validatingadmissionpolicy"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..5a2cf79c53
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyBindingLister interface {
    +	// List lists all ValidatingAdmissionPolicyBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error)
    +	// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error)
    +	ValidatingAdmissionPolicyBindingListerExpansion
    +}
    +
    +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface.
    +type validatingAdmissionPolicyBindingLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicyBinding]
    +}
    +
    +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister.
    +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister {
    +	return &validatingAdmissionPolicyBindingLister{listers.New[*v1alpha1.ValidatingAdmissionPolicyBinding](indexer, v1alpha1.Resource("validatingadmissionpolicybinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..7148781f42
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to
    +// MutatingWebhookConfigurationLister.
    +type MutatingWebhookConfigurationListerExpansion interface{}
    +
    +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyLister.
    +type ValidatingAdmissionPolicyListerExpansion interface{}
    +
    +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to
    +// ValidatingAdmissionPolicyBindingLister.
    +type ValidatingAdmissionPolicyBindingListerExpansion interface{}
    +
    +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to
    +// ValidatingWebhookConfigurationLister.
    +type ValidatingWebhookConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..3bad49ac0a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations.
    +// All objects returned here must be treated as read-only.
    +type MutatingWebhookConfigurationLister interface {
    +	// List lists all MutatingWebhookConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error)
    +	// Get retrieves the MutatingWebhookConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.MutatingWebhookConfiguration, error)
    +	MutatingWebhookConfigurationListerExpansion
    +}
    +
    +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface.
    +type mutatingWebhookConfigurationLister struct {
    +	listers.ResourceIndexer[*v1beta1.MutatingWebhookConfiguration]
    +}
    +
    +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister.
    +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister {
    +	return &mutatingWebhookConfigurationLister{listers.New[*v1beta1.MutatingWebhookConfiguration](indexer, v1beta1.Resource("mutatingwebhookconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go
    new file mode 100644
    index 0000000000..74d7c6ce3e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyLister interface {
    +	// List lists all ValidatingAdmissionPolicies in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error)
    +	// Get retrieves the ValidatingAdmissionPolicy from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error)
    +	ValidatingAdmissionPolicyListerExpansion
    +}
    +
    +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface.
    +type validatingAdmissionPolicyLister struct {
    +	listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicy]
    +}
    +
    +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister.
    +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister {
    +	return &validatingAdmissionPolicyLister{listers.New[*v1beta1.ValidatingAdmissionPolicy](indexer, v1beta1.Resource("validatingadmissionpolicy"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    new file mode 100644
    index 0000000000..668d652bb7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings.
    +// All objects returned here must be treated as read-only.
    +type ValidatingAdmissionPolicyBindingLister interface {
    +	// List lists all ValidatingAdmissionPolicyBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error)
    +	// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
    +	ValidatingAdmissionPolicyBindingListerExpansion
    +}
    +
    +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface.
    +type validatingAdmissionPolicyBindingLister struct {
    +	listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicyBinding]
    +}
    +
    +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister.
    +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister {
    +	return &validatingAdmissionPolicyBindingLister{listers.New[*v1beta1.ValidatingAdmissionPolicyBinding](indexer, v1beta1.Resource("validatingadmissionpolicybinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    new file mode 100644
    index 0000000000..16167d5738
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations.
    +// All objects returned here must be treated as read-only.
    +type ValidatingWebhookConfigurationLister interface {
    +	// List lists all ValidatingWebhookConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error)
    +	// Get retrieves the ValidatingWebhookConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error)
    +	ValidatingWebhookConfigurationListerExpansion
    +}
    +
    +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface.
    +type validatingWebhookConfigurationLister struct {
    +	listers.ResourceIndexer[*v1beta1.ValidatingWebhookConfiguration]
    +}
    +
    +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister.
    +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister {
    +	return &validatingWebhookConfigurationLister{listers.New[*v1beta1.ValidatingWebhookConfiguration](indexer, v1beta1.Resource("validatingwebhookconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..ad860c7c95
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// StorageVersionListerExpansion allows custom methods to be added to
    +// StorageVersionLister.
    +type StorageVersionListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go
    new file mode 100644
    index 0000000000..ce51b88f28
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageVersionLister helps list StorageVersions.
    +// All objects returned here must be treated as read-only.
    +type StorageVersionLister interface {
    +	// List lists all StorageVersions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error)
    +	// Get retrieves the StorageVersion from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.StorageVersion, error)
    +	StorageVersionListerExpansion
    +}
    +
    +// storageVersionLister implements the StorageVersionLister interface.
    +type storageVersionLister struct {
    +	listers.ResourceIndexer[*v1alpha1.StorageVersion]
    +}
    +
    +// NewStorageVersionLister returns a new StorageVersionLister.
    +func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister {
    +	return &storageVersionLister{listers.New[*v1alpha1.StorageVersion](indexer, v1alpha1.Resource("storageversion"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go
    new file mode 100644
    index 0000000000..b9061b159e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/apps/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionLister helps list ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionLister interface {
    +	// List lists all ControllerRevisions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ControllerRevision, err error)
    +	// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +	ControllerRevisions(namespace string) ControllerRevisionNamespaceLister
    +	ControllerRevisionListerExpansion
    +}
    +
    +// controllerRevisionLister implements the ControllerRevisionLister interface.
    +type controllerRevisionLister struct {
    +	listers.ResourceIndexer[*v1.ControllerRevision]
    +}
    +
    +// NewControllerRevisionLister returns a new ControllerRevisionLister.
    +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister {
    +	return &controllerRevisionLister{listers.New[*v1.ControllerRevision](indexer, v1.Resource("controllerrevision"))}
    +}
    +
    +// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister {
    +	return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1.ControllerRevision](s.ResourceIndexer, namespace)}
    +}
    +
    +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionNamespaceLister interface {
    +	// List lists all ControllerRevisions in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ControllerRevision, err error)
    +	// Get retrieves the ControllerRevision from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ControllerRevision, error)
    +	ControllerRevisionNamespaceListerExpansion
    +}
    +
    +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister
    +// interface.
    +type controllerRevisionNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ControllerRevision]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go
    new file mode 100644
    index 0000000000..4240cb6248
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/apps/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetLister helps list DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetLister interface {
    +	// List lists all DaemonSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.DaemonSet, err error)
    +	// DaemonSets returns an object that can list and get DaemonSets.
    +	DaemonSets(namespace string) DaemonSetNamespaceLister
    +	DaemonSetListerExpansion
    +}
    +
    +// daemonSetLister implements the DaemonSetLister interface.
    +type daemonSetLister struct {
    +	listers.ResourceIndexer[*v1.DaemonSet]
    +}
    +
    +// NewDaemonSetLister returns a new DaemonSetLister.
    +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister {
    +	return &daemonSetLister{listers.New[*v1.DaemonSet](indexer, v1.Resource("daemonset"))}
    +}
    +
    +// DaemonSets returns an object that can list and get DaemonSets.
    +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister {
    +	return daemonSetNamespaceLister{listers.NewNamespaced[*v1.DaemonSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// DaemonSetNamespaceLister helps list and get DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetNamespaceLister interface {
    +	// List lists all DaemonSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.DaemonSet, err error)
    +	// Get retrieves the DaemonSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.DaemonSet, error)
    +	DaemonSetNamespaceListerExpansion
    +}
    +
    +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister
    +// interface.
    +type daemonSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.DaemonSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go
    new file mode 100644
    index 0000000000..667d6fb88e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go
    @@ -0,0 +1,114 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// DaemonSetListerExpansion allows custom methods to be added to
    +// DaemonSetLister.
    +type DaemonSetListerExpansion interface {
    +	GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error)
    +	GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error)
    +}
    +
    +// DaemonSetNamespaceListerExpansion allows custom methods to be added to
    +// DaemonSetNamespaceLister.
    +type DaemonSetNamespaceListerExpansion interface{}
    +
    +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) {
    +	var selector labels.Selector
    +	var daemonSet *apps.DaemonSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.DaemonSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*apps.DaemonSet
    +	for i := range list {
    +		daemonSet = list[i]
    +		if daemonSet.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, daemonSet)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    +
    +// GetHistoryDaemonSets returns a list of DaemonSets that potentially
    +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef
    +// will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) {
    +	if len(history.Labels) == 0 {
    +		return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name)
    +	}
    +
    +	list, err := s.DaemonSets(history.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*apps.DaemonSet
    +	for _, ds := range list {
    +		selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the history
    +			continue
    +		}
    +		// If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, ds)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go
    new file mode 100644
    index 0000000000..3337026b73
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/apps/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentLister helps list Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentLister interface {
    +	// List lists all Deployments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Deployment, err error)
    +	// Deployments returns an object that can list and get Deployments.
    +	Deployments(namespace string) DeploymentNamespaceLister
    +	DeploymentListerExpansion
    +}
    +
    +// deploymentLister implements the DeploymentLister interface.
    +type deploymentLister struct {
    +	listers.ResourceIndexer[*v1.Deployment]
    +}
    +
    +// NewDeploymentLister returns a new DeploymentLister.
    +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister {
    +	return &deploymentLister{listers.New[*v1.Deployment](indexer, v1.Resource("deployment"))}
    +}
    +
    +// Deployments returns an object that can list and get Deployments.
    +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister {
    +	return deploymentNamespaceLister{listers.NewNamespaced[*v1.Deployment](s.ResourceIndexer, namespace)}
    +}
    +
    +// DeploymentNamespaceLister helps list and get Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentNamespaceLister interface {
    +	// List lists all Deployments in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Deployment, err error)
    +	// Get retrieves the Deployment from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Deployment, error)
    +	DeploymentNamespaceListerExpansion
    +}
    +
    +// deploymentNamespaceLister implements the DeploymentNamespaceLister
    +// interface.
    +type deploymentNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Deployment]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..0c357589d0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// ControllerRevisionListerExpansion allows custom methods to be added to
    +// ControllerRevisionLister.
    +type ControllerRevisionListerExpansion interface{}
    +
    +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to
    +// ControllerRevisionNamespaceLister.
    +type ControllerRevisionNamespaceListerExpansion interface{}
    +
    +// DeploymentListerExpansion allows custom methods to be added to
    +// DeploymentLister.
    +type DeploymentListerExpansion interface{}
    +
    +// DeploymentNamespaceListerExpansion allows custom methods to be added to
    +// DeploymentNamespaceLister.
    +type DeploymentNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go
    new file mode 100644
    index 0000000000..244df1d33f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/apps/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetLister helps list ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetLister interface {
    +	// List lists all ReplicaSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ReplicaSet, err error)
    +	// ReplicaSets returns an object that can list and get ReplicaSets.
    +	ReplicaSets(namespace string) ReplicaSetNamespaceLister
    +	ReplicaSetListerExpansion
    +}
    +
    +// replicaSetLister implements the ReplicaSetLister interface.
    +type replicaSetLister struct {
    +	listers.ResourceIndexer[*v1.ReplicaSet]
    +}
    +
    +// NewReplicaSetLister returns a new ReplicaSetLister.
    +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister {
    +	return &replicaSetLister{listers.New[*v1.ReplicaSet](indexer, v1.Resource("replicaset"))}
    +}
    +
    +// ReplicaSets returns an object that can list and get ReplicaSets.
    +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister {
    +	return replicaSetNamespaceLister{listers.NewNamespaced[*v1.ReplicaSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// ReplicaSetNamespaceLister helps list and get ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetNamespaceLister interface {
    +	// List lists all ReplicaSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ReplicaSet, err error)
    +	// Get retrieves the ReplicaSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ReplicaSet, error)
    +	ReplicaSetNamespaceListerExpansion
    +}
    +
    +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister
    +// interface.
    +type replicaSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ReplicaSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go
    new file mode 100644
    index 0000000000..8e093de0a0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go
    @@ -0,0 +1,74 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// ReplicaSetListerExpansion allows custom methods to be added to
    +// ReplicaSetLister.
    +type ReplicaSetListerExpansion interface {
    +	GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error)
    +}
    +
    +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to
    +// ReplicaSetNamespaceLister.
    +type ReplicaSetNamespaceListerExpansion interface{}
    +
    +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching ReplicaSets are found.
    +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) {
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var rss []*apps.ReplicaSet
    +	for _, rs := range list {
    +		if rs.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		rss = append(rss, rs)
    +	}
    +
    +	if len(rss) == 0 {
    +		return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return rss, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go
    new file mode 100644
    index 0000000000..a8dc1b022a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/apps/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetLister helps list StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetLister interface {
    +	// List lists all StatefulSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.StatefulSet, err error)
    +	// StatefulSets returns an object that can list and get StatefulSets.
    +	StatefulSets(namespace string) StatefulSetNamespaceLister
    +	StatefulSetListerExpansion
    +}
    +
    +// statefulSetLister implements the StatefulSetLister interface.
    +type statefulSetLister struct {
    +	listers.ResourceIndexer[*v1.StatefulSet]
    +}
    +
    +// NewStatefulSetLister returns a new StatefulSetLister.
    +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister {
    +	return &statefulSetLister{listers.New[*v1.StatefulSet](indexer, v1.Resource("statefulset"))}
    +}
    +
    +// StatefulSets returns an object that can list and get StatefulSets.
    +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister {
    +	return statefulSetNamespaceLister{listers.NewNamespaced[*v1.StatefulSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// StatefulSetNamespaceLister helps list and get StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetNamespaceLister interface {
    +	// List lists all StatefulSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.StatefulSet, err error)
    +	// Get retrieves the StatefulSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.StatefulSet, error)
    +	StatefulSetNamespaceListerExpansion
    +}
    +
    +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister
    +// interface.
    +type statefulSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.StatefulSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go
    new file mode 100644
    index 0000000000..e79f8a2b46
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go
    @@ -0,0 +1,78 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// StatefulSetListerExpansion allows custom methods to be added to
    +// StatefulSetLister.
    +type StatefulSetListerExpansion interface {
    +	GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error)
    +}
    +
    +// StatefulSetNamespaceListerExpansion allows custom methods to be added to
    +// StatefulSetNamespaceLister.
    +type StatefulSetNamespaceListerExpansion interface{}
    +
    +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching StatefulSets are found.
    +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) {
    +	var selector labels.Selector
    +	var ps *apps.StatefulSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.StatefulSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var psList []*apps.StatefulSet
    +	for i := range list {
    +		ps = list[i]
    +		if ps.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		psList = append(psList, ps)
    +	}
    +
    +	if len(psList) == 0 {
    +		return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return psList, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go
    new file mode 100644
    index 0000000000..c5e8fb3735
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/apps/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionLister helps list ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionLister interface {
    +	// List lists all ControllerRevisions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error)
    +	// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +	ControllerRevisions(namespace string) ControllerRevisionNamespaceLister
    +	ControllerRevisionListerExpansion
    +}
    +
    +// controllerRevisionLister implements the ControllerRevisionLister interface.
    +type controllerRevisionLister struct {
    +	listers.ResourceIndexer[*v1beta1.ControllerRevision]
    +}
    +
    +// NewControllerRevisionLister returns a new ControllerRevisionLister.
    +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister {
    +	return &controllerRevisionLister{listers.New[*v1beta1.ControllerRevision](indexer, v1beta1.Resource("controllerrevision"))}
    +}
    +
    +// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister {
    +	return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta1.ControllerRevision](s.ResourceIndexer, namespace)}
    +}
    +
    +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionNamespaceLister interface {
    +	// List lists all ControllerRevisions in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error)
    +	// Get retrieves the ControllerRevision from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ControllerRevision, error)
    +	ControllerRevisionNamespaceListerExpansion
    +}
    +
    +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister
    +// interface.
    +type controllerRevisionNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.ControllerRevision]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go
    new file mode 100644
    index 0000000000..1bc6d45ad0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/apps/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentLister helps list Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentLister interface {
    +	// List lists all Deployments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Deployment, err error)
    +	// Deployments returns an object that can list and get Deployments.
    +	Deployments(namespace string) DeploymentNamespaceLister
    +	DeploymentListerExpansion
    +}
    +
    +// deploymentLister implements the DeploymentLister interface.
    +type deploymentLister struct {
    +	listers.ResourceIndexer[*v1beta1.Deployment]
    +}
    +
    +// NewDeploymentLister returns a new DeploymentLister.
    +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister {
    +	return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))}
    +}
    +
    +// Deployments returns an object that can list and get Deployments.
    +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister {
    +	return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)}
    +}
    +
    +// DeploymentNamespaceLister helps list and get Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentNamespaceLister interface {
    +	// List lists all Deployments in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Deployment, err error)
    +	// Get retrieves the Deployment from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Deployment, error)
    +	DeploymentNamespaceListerExpansion
    +}
    +
    +// deploymentNamespaceLister implements the DeploymentNamespaceLister
    +// interface.
    +type deploymentNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Deployment]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..c73cf98c7a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// ControllerRevisionListerExpansion allows custom methods to be added to
    +// ControllerRevisionLister.
    +type ControllerRevisionListerExpansion interface{}
    +
    +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to
    +// ControllerRevisionNamespaceLister.
    +type ControllerRevisionNamespaceListerExpansion interface{}
    +
    +// DeploymentListerExpansion allows custom methods to be added to
    +// DeploymentLister.
    +type DeploymentListerExpansion interface{}
    +
    +// DeploymentNamespaceListerExpansion allows custom methods to be added to
    +// DeploymentNamespaceLister.
    +type DeploymentNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go
    new file mode 100644
    index 0000000000..4bf103aef7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/apps/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetLister helps list StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetLister interface {
    +	// List lists all StatefulSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error)
    +	// StatefulSets returns an object that can list and get StatefulSets.
    +	StatefulSets(namespace string) StatefulSetNamespaceLister
    +	StatefulSetListerExpansion
    +}
    +
    +// statefulSetLister implements the StatefulSetLister interface.
    +type statefulSetLister struct {
    +	listers.ResourceIndexer[*v1beta1.StatefulSet]
    +}
    +
    +// NewStatefulSetLister returns a new StatefulSetLister.
    +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister {
    +	return &statefulSetLister{listers.New[*v1beta1.StatefulSet](indexer, v1beta1.Resource("statefulset"))}
    +}
    +
    +// StatefulSets returns an object that can list and get StatefulSets.
    +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister {
    +	return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta1.StatefulSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// StatefulSetNamespaceLister helps list and get StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetNamespaceLister interface {
    +	// List lists all StatefulSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error)
    +	// Get retrieves the StatefulSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.StatefulSet, error)
    +	StatefulSetNamespaceListerExpansion
    +}
    +
    +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister
    +// interface.
    +type statefulSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.StatefulSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go
    new file mode 100644
    index 0000000000..7d2c4d9b07
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go
    @@ -0,0 +1,78 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta1
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1beta1"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// StatefulSetListerExpansion allows custom methods to be added to
    +// StatefulSetLister.
    +type StatefulSetListerExpansion interface {
    +	GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error)
    +}
    +
    +// StatefulSetNamespaceListerExpansion allows custom methods to be added to
    +// StatefulSetNamespaceLister.
    +type StatefulSetNamespaceListerExpansion interface{}
    +
    +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching StatefulSets are found.
    +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) {
    +	var selector labels.Selector
    +	var ps *apps.StatefulSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.StatefulSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var psList []*apps.StatefulSet
    +	for i := range list {
    +		ps = list[i]
    +		if ps.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		psList = append(psList, ps)
    +	}
    +
    +	if len(psList) == 0 {
    +		return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return psList, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go
    new file mode 100644
    index 0000000000..de941bc691
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ControllerRevisionLister helps list ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionLister interface {
    +	// List lists all ControllerRevisions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error)
    +	// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +	ControllerRevisions(namespace string) ControllerRevisionNamespaceLister
    +	ControllerRevisionListerExpansion
    +}
    +
    +// controllerRevisionLister implements the ControllerRevisionLister interface.
    +type controllerRevisionLister struct {
    +	listers.ResourceIndexer[*v1beta2.ControllerRevision]
    +}
    +
    +// NewControllerRevisionLister returns a new ControllerRevisionLister.
    +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister {
    +	return &controllerRevisionLister{listers.New[*v1beta2.ControllerRevision](indexer, v1beta2.Resource("controllerrevision"))}
    +}
    +
    +// ControllerRevisions returns an object that can list and get ControllerRevisions.
    +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister {
    +	return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta2.ControllerRevision](s.ResourceIndexer, namespace)}
    +}
    +
    +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions.
    +// All objects returned here must be treated as read-only.
    +type ControllerRevisionNamespaceLister interface {
    +	// List lists all ControllerRevisions in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error)
    +	// Get retrieves the ControllerRevision from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.ControllerRevision, error)
    +	ControllerRevisionNamespaceListerExpansion
    +}
    +
    +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister
    +// interface.
    +type controllerRevisionNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta2.ControllerRevision]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go
    new file mode 100644
    index 0000000000..37784fe888
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetLister helps list DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetLister interface {
    +	// List lists all DaemonSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error)
    +	// DaemonSets returns an object that can list and get DaemonSets.
    +	DaemonSets(namespace string) DaemonSetNamespaceLister
    +	DaemonSetListerExpansion
    +}
    +
    +// daemonSetLister implements the DaemonSetLister interface.
    +type daemonSetLister struct {
    +	listers.ResourceIndexer[*v1beta2.DaemonSet]
    +}
    +
    +// NewDaemonSetLister returns a new DaemonSetLister.
    +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister {
    +	return &daemonSetLister{listers.New[*v1beta2.DaemonSet](indexer, v1beta2.Resource("daemonset"))}
    +}
    +
    +// DaemonSets returns an object that can list and get DaemonSets.
    +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister {
    +	return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta2.DaemonSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// DaemonSetNamespaceLister helps list and get DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetNamespaceLister interface {
    +	// List lists all DaemonSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error)
    +	// Get retrieves the DaemonSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.DaemonSet, error)
    +	DaemonSetNamespaceListerExpansion
    +}
    +
    +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister
    +// interface.
    +type daemonSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta2.DaemonSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go
    new file mode 100644
    index 0000000000..e722b63b68
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go
    @@ -0,0 +1,114 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta2
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1beta2"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// DaemonSetListerExpansion allows custom methods to be added to
    +// DaemonSetLister.
    +type DaemonSetListerExpansion interface {
    +	GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error)
    +	GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error)
    +}
    +
    +// DaemonSetNamespaceListerExpansion allows custom methods to be added to
    +// DaemonSetNamespaceLister.
    +type DaemonSetNamespaceListerExpansion interface{}
    +
    +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) {
    +	var selector labels.Selector
    +	var daemonSet *apps.DaemonSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.DaemonSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*apps.DaemonSet
    +	for i := range list {
    +		daemonSet = list[i]
    +		if daemonSet.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, daemonSet)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    +
    +// GetHistoryDaemonSets returns a list of DaemonSets that potentially
    +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef
    +// will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) {
    +	if len(history.Labels) == 0 {
    +		return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name)
    +	}
    +
    +	list, err := s.DaemonSets(history.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*apps.DaemonSet
    +	for _, ds := range list {
    +		selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the history object
    +			continue
    +		}
    +		// If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, ds)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go
    new file mode 100644
    index 0000000000..75acc1693e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentLister helps list Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentLister interface {
    +	// List lists all Deployments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.Deployment, err error)
    +	// Deployments returns an object that can list and get Deployments.
    +	Deployments(namespace string) DeploymentNamespaceLister
    +	DeploymentListerExpansion
    +}
    +
    +// deploymentLister implements the DeploymentLister interface.
    +type deploymentLister struct {
    +	listers.ResourceIndexer[*v1beta2.Deployment]
    +}
    +
    +// NewDeploymentLister returns a new DeploymentLister.
    +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister {
    +	return &deploymentLister{listers.New[*v1beta2.Deployment](indexer, v1beta2.Resource("deployment"))}
    +}
    +
    +// Deployments returns an object that can list and get Deployments.
    +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister {
    +	return deploymentNamespaceLister{listers.NewNamespaced[*v1beta2.Deployment](s.ResourceIndexer, namespace)}
    +}
    +
    +// DeploymentNamespaceLister helps list and get Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentNamespaceLister interface {
    +	// List lists all Deployments in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.Deployment, err error)
    +	// Get retrieves the Deployment from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.Deployment, error)
    +	DeploymentNamespaceListerExpansion
    +}
    +
    +// deploymentNamespaceLister implements the DeploymentNamespaceLister
    +// interface.
    +type deploymentNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta2.Deployment]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go
    new file mode 100644
    index 0000000000..b6d202118e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +// ControllerRevisionListerExpansion allows custom methods to be added to
    +// ControllerRevisionLister.
    +type ControllerRevisionListerExpansion interface{}
    +
    +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to
    +// ControllerRevisionNamespaceLister.
    +type ControllerRevisionNamespaceListerExpansion interface{}
    +
    +// DeploymentListerExpansion allows custom methods to be added to
    +// DeploymentLister.
    +type DeploymentListerExpansion interface{}
    +
    +// DeploymentNamespaceListerExpansion allows custom methods to be added to
    +// DeploymentNamespaceLister.
    +type DeploymentNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go
    new file mode 100644
    index 0000000000..37ea97630e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetLister helps list ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetLister interface {
    +	// List lists all ReplicaSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error)
    +	// ReplicaSets returns an object that can list and get ReplicaSets.
    +	ReplicaSets(namespace string) ReplicaSetNamespaceLister
    +	ReplicaSetListerExpansion
    +}
    +
    +// replicaSetLister implements the ReplicaSetLister interface.
    +type replicaSetLister struct {
    +	listers.ResourceIndexer[*v1beta2.ReplicaSet]
    +}
    +
    +// NewReplicaSetLister returns a new ReplicaSetLister.
    +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister {
    +	return &replicaSetLister{listers.New[*v1beta2.ReplicaSet](indexer, v1beta2.Resource("replicaset"))}
    +}
    +
    +// ReplicaSets returns an object that can list and get ReplicaSets.
    +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister {
    +	return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta2.ReplicaSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// ReplicaSetNamespaceLister helps list and get ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetNamespaceLister interface {
    +	// List lists all ReplicaSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error)
    +	// Get retrieves the ReplicaSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.ReplicaSet, error)
    +	ReplicaSetNamespaceListerExpansion
    +}
    +
    +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister
    +// interface.
    +type replicaSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta2.ReplicaSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go
    new file mode 100644
    index 0000000000..bc014b5a69
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go
    @@ -0,0 +1,74 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta2
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1beta2"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// ReplicaSetListerExpansion allows custom methods to be added to
    +// ReplicaSetLister.
    +type ReplicaSetListerExpansion interface {
    +	GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error)
    +}
    +
    +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to
    +// ReplicaSetNamespaceLister.
    +type ReplicaSetNamespaceListerExpansion interface{}
    +
    +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching ReplicaSets are found.
    +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) {
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var rss []*apps.ReplicaSet
    +	for _, rs := range list {
    +		if rs.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		rss = append(rss, rs)
    +	}
    +
    +	if len(rss) == 0 {
    +		return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return rss, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go
    new file mode 100644
    index 0000000000..cc48a1473c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/apps/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StatefulSetLister helps list StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetLister interface {
    +	// List lists all StatefulSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error)
    +	// StatefulSets returns an object that can list and get StatefulSets.
    +	StatefulSets(namespace string) StatefulSetNamespaceLister
    +	StatefulSetListerExpansion
    +}
    +
    +// statefulSetLister implements the StatefulSetLister interface.
    +type statefulSetLister struct {
    +	listers.ResourceIndexer[*v1beta2.StatefulSet]
    +}
    +
    +// NewStatefulSetLister returns a new StatefulSetLister.
    +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister {
    +	return &statefulSetLister{listers.New[*v1beta2.StatefulSet](indexer, v1beta2.Resource("statefulset"))}
    +}
    +
    +// StatefulSets returns an object that can list and get StatefulSets.
    +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister {
    +	return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta2.StatefulSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// StatefulSetNamespaceLister helps list and get StatefulSets.
    +// All objects returned here must be treated as read-only.
    +type StatefulSetNamespaceLister interface {
    +	// List lists all StatefulSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error)
    +	// Get retrieves the StatefulSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.StatefulSet, error)
    +	StatefulSetNamespaceListerExpansion
    +}
    +
    +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister
    +// interface.
    +type statefulSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta2.StatefulSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go
    new file mode 100644
    index 0000000000..eae31b82f8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go
    @@ -0,0 +1,78 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta2
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1beta2"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// StatefulSetListerExpansion allows custom methods to be added to
    +// StatefulSetLister.
    +type StatefulSetListerExpansion interface {
    +	GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error)
    +}
    +
    +// StatefulSetNamespaceListerExpansion allows custom methods to be added to
    +// StatefulSetNamespaceLister.
    +type StatefulSetNamespaceListerExpansion interface{}
    +
    +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching StatefulSets are found.
    +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) {
    +	var selector labels.Selector
    +	var ps *apps.StatefulSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.StatefulSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var psList []*apps.StatefulSet
    +	for i := range list {
    +		ps = list[i]
    +		if ps.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		psList = append(psList, ps)
    +	}
    +
    +	if len(psList) == 0 {
    +		return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return psList, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..05253c7703
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerLister.
    +type HorizontalPodAutoscalerListerExpansion interface{}
    +
    +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerNamespaceLister.
    +type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..2cd4cc87bf
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/autoscaling/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error)
    +	// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister
    +	HorizontalPodAutoscalerListerExpansion
    +}
    +
    +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface.
    +type horizontalPodAutoscalerLister struct {
    +	listers.ResourceIndexer[*v1.HorizontalPodAutoscaler]
    +}
    +
    +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister.
    +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister {
    +	return &horizontalPodAutoscalerLister{listers.New[*v1.HorizontalPodAutoscaler](indexer, v1.Resource("horizontalpodautoscaler"))}
    +}
    +
    +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister {
    +	return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)}
    +}
    +
    +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerNamespaceLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error)
    +	// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.HorizontalPodAutoscaler, error)
    +	HorizontalPodAutoscalerNamespaceListerExpansion
    +}
    +
    +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister
    +// interface.
    +type horizontalPodAutoscalerNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.HorizontalPodAutoscaler]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go
    new file mode 100644
    index 0000000000..97742b77b2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2
    +
    +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerLister.
    +type HorizontalPodAutoscalerListerExpansion interface{}
    +
    +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerNamespaceLister.
    +type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..7c2806af21
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2
    +
    +import (
    +	v2 "k8s.io/api/autoscaling/v2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error)
    +	// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister
    +	HorizontalPodAutoscalerListerExpansion
    +}
    +
    +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface.
    +type horizontalPodAutoscalerLister struct {
    +	listers.ResourceIndexer[*v2.HorizontalPodAutoscaler]
    +}
    +
    +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister.
    +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister {
    +	return &horizontalPodAutoscalerLister{listers.New[*v2.HorizontalPodAutoscaler](indexer, v2.Resource("horizontalpodautoscaler"))}
    +}
    +
    +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister {
    +	return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)}
    +}
    +
    +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerNamespaceLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error)
    +	// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v2.HorizontalPodAutoscaler, error)
    +	HorizontalPodAutoscalerNamespaceListerExpansion
    +}
    +
    +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister
    +// interface.
    +type horizontalPodAutoscalerNamespaceLister struct {
    +	listers.ResourceIndexer[*v2.HorizontalPodAutoscaler]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..8d46a4b6e3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2beta1
    +
    +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerLister.
    +type HorizontalPodAutoscalerListerExpansion interface{}
    +
    +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerNamespaceLister.
    +type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..a2befd6062
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2beta1
    +
    +import (
    +	v2beta1 "k8s.io/api/autoscaling/v2beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error)
    +	// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister
    +	HorizontalPodAutoscalerListerExpansion
    +}
    +
    +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface.
    +type horizontalPodAutoscalerLister struct {
    +	listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler]
    +}
    +
    +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister.
    +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister {
    +	return &horizontalPodAutoscalerLister{listers.New[*v2beta1.HorizontalPodAutoscaler](indexer, v2beta1.Resource("horizontalpodautoscaler"))}
    +}
    +
    +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister {
    +	return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)}
    +}
    +
    +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerNamespaceLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error)
    +	// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v2beta1.HorizontalPodAutoscaler, error)
    +	HorizontalPodAutoscalerNamespaceListerExpansion
    +}
    +
    +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister
    +// interface.
    +type horizontalPodAutoscalerNamespaceLister struct {
    +	listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/expansion_generated.go
    new file mode 100644
    index 0000000000..5127945a9c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2beta2
    +
    +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerLister.
    +type HorizontalPodAutoscalerListerExpansion interface{}
    +
    +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
    +// HorizontalPodAutoscalerNamespaceLister.
    +type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go
    new file mode 100644
    index 0000000000..52bae849ba
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v2beta2
    +
    +import (
    +	v2beta2 "k8s.io/api/autoscaling/v2beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error)
    +	// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister
    +	HorizontalPodAutoscalerListerExpansion
    +}
    +
    +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface.
    +type horizontalPodAutoscalerLister struct {
    +	listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler]
    +}
    +
    +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister.
    +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister {
    +	return &horizontalPodAutoscalerLister{listers.New[*v2beta2.HorizontalPodAutoscaler](indexer, v2beta2.Resource("horizontalpodautoscaler"))}
    +}
    +
    +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers.
    +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister {
    +	return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)}
    +}
    +
    +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers.
    +// All objects returned here must be treated as read-only.
    +type HorizontalPodAutoscalerNamespaceLister interface {
    +	// List lists all HorizontalPodAutoscalers in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error)
    +	// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v2beta2.HorizontalPodAutoscaler, error)
    +	HorizontalPodAutoscalerNamespaceListerExpansion
    +}
    +
    +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister
    +// interface.
    +type horizontalPodAutoscalerNamespaceLister struct {
    +	listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go
    new file mode 100644
    index 0000000000..a7a3abbfa3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/batch/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CronJobLister helps list CronJobs.
    +// All objects returned here must be treated as read-only.
    +type CronJobLister interface {
    +	// List lists all CronJobs in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CronJob, err error)
    +	// CronJobs returns an object that can list and get CronJobs.
    +	CronJobs(namespace string) CronJobNamespaceLister
    +	CronJobListerExpansion
    +}
    +
    +// cronJobLister implements the CronJobLister interface.
    +type cronJobLister struct {
    +	listers.ResourceIndexer[*v1.CronJob]
    +}
    +
    +// NewCronJobLister returns a new CronJobLister.
    +func NewCronJobLister(indexer cache.Indexer) CronJobLister {
    +	return &cronJobLister{listers.New[*v1.CronJob](indexer, v1.Resource("cronjob"))}
    +}
    +
    +// CronJobs returns an object that can list and get CronJobs.
    +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister {
    +	return cronJobNamespaceLister{listers.NewNamespaced[*v1.CronJob](s.ResourceIndexer, namespace)}
    +}
    +
    +// CronJobNamespaceLister helps list and get CronJobs.
    +// All objects returned here must be treated as read-only.
    +type CronJobNamespaceLister interface {
    +	// List lists all CronJobs in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CronJob, err error)
    +	// Get retrieves the CronJob from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.CronJob, error)
    +	CronJobNamespaceListerExpansion
    +}
    +
    +// cronJobNamespaceLister implements the CronJobNamespaceLister
    +// interface.
    +type cronJobNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.CronJob]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..2209762790
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// CronJobListerExpansion allows custom methods to be added to
    +// CronJobLister.
    +type CronJobListerExpansion interface{}
    +
    +// CronJobNamespaceListerExpansion allows custom methods to be added to
    +// CronJobNamespaceLister.
    +type CronJobNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go
    new file mode 100644
    index 0000000000..4078a9f7d8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/batch/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// JobLister helps list Jobs.
    +// All objects returned here must be treated as read-only.
    +type JobLister interface {
    +	// List lists all Jobs in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Job, err error)
    +	// Jobs returns an object that can list and get Jobs.
    +	Jobs(namespace string) JobNamespaceLister
    +	JobListerExpansion
    +}
    +
    +// jobLister implements the JobLister interface.
    +type jobLister struct {
    +	listers.ResourceIndexer[*v1.Job]
    +}
    +
    +// NewJobLister returns a new JobLister.
    +func NewJobLister(indexer cache.Indexer) JobLister {
    +	return &jobLister{listers.New[*v1.Job](indexer, v1.Resource("job"))}
    +}
    +
    +// Jobs returns an object that can list and get Jobs.
    +func (s *jobLister) Jobs(namespace string) JobNamespaceLister {
    +	return jobNamespaceLister{listers.NewNamespaced[*v1.Job](s.ResourceIndexer, namespace)}
    +}
    +
    +// JobNamespaceLister helps list and get Jobs.
    +// All objects returned here must be treated as read-only.
    +type JobNamespaceLister interface {
    +	// List lists all Jobs in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Job, err error)
    +	// Get retrieves the Job from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Job, error)
    +	JobNamespaceListerExpansion
    +}
    +
    +// jobNamespaceLister implements the JobNamespaceLister
    +// interface.
    +type jobNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Job]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go
    new file mode 100644
    index 0000000000..8dc5db7885
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go
    @@ -0,0 +1,72 @@
    +/*
    +Copyright 2016 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	batch "k8s.io/api/batch/v1"
    +	"k8s.io/api/core/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// JobListerExpansion allows custom methods to be added to
    +// JobLister.
    +type JobListerExpansion interface {
    +	// GetPodJobs returns a list of Jobs that potentially
    +	// match a Pod. Only the one specified in the Pod's ControllerRef
    +	// will actually manage it.
    +	// Returns an error only if no matching Jobs are found.
    +	GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error)
    +}
    +
    +// GetPodJobs returns a list of Jobs that potentially
    +// match a Pod. Only the one specified in the Pod's ControllerRef
    +// will actually manage it.
    +// Returns an error only if no matching Jobs are found.
    +func (l *jobLister) GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error) {
    +	if len(pod.Labels) == 0 {
    +		err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name)
    +		return
    +	}
    +
    +	var list []*batch.Job
    +	list, err = l.Jobs(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return
    +	}
    +	for _, job := range list {
    +		selector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +		if !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		jobs = append(jobs, *job)
    +	}
    +	if len(jobs) == 0 {
    +		err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +	return
    +}
    +
    +// JobNamespaceListerExpansion allows custom methods to be added to
    +// JobNamespaceLister.
    +type JobNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go
    new file mode 100644
    index 0000000000..33ed8219e3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/batch/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CronJobLister helps list CronJobs.
    +// All objects returned here must be treated as read-only.
    +type CronJobLister interface {
    +	// List lists all CronJobs in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CronJob, err error)
    +	// CronJobs returns an object that can list and get CronJobs.
    +	CronJobs(namespace string) CronJobNamespaceLister
    +	CronJobListerExpansion
    +}
    +
    +// cronJobLister implements the CronJobLister interface.
    +type cronJobLister struct {
    +	listers.ResourceIndexer[*v1beta1.CronJob]
    +}
    +
    +// NewCronJobLister returns a new CronJobLister.
    +func NewCronJobLister(indexer cache.Indexer) CronJobLister {
    +	return &cronJobLister{listers.New[*v1beta1.CronJob](indexer, v1beta1.Resource("cronjob"))}
    +}
    +
    +// CronJobs returns an object that can list and get CronJobs.
    +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister {
    +	return cronJobNamespaceLister{listers.NewNamespaced[*v1beta1.CronJob](s.ResourceIndexer, namespace)}
    +}
    +
    +// CronJobNamespaceLister helps list and get CronJobs.
    +// All objects returned here must be treated as read-only.
    +type CronJobNamespaceLister interface {
    +	// List lists all CronJobs in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CronJob, err error)
    +	// Get retrieves the CronJob from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.CronJob, error)
    +	CronJobNamespaceListerExpansion
    +}
    +
    +// cronJobNamespaceLister implements the CronJobNamespaceLister
    +// interface.
    +type cronJobNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.CronJob]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..be2742ef61
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// CronJobListerExpansion allows custom methods to be added to
    +// CronJobLister.
    +type CronJobListerExpansion interface{}
    +
    +// CronJobNamespaceListerExpansion allows custom methods to be added to
    +// CronJobNamespaceLister.
    +type CronJobNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go
    new file mode 100644
    index 0000000000..38e4a3a658
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/certificates/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CertificateSigningRequestLister helps list CertificateSigningRequests.
    +// All objects returned here must be treated as read-only.
    +type CertificateSigningRequestLister interface {
    +	// List lists all CertificateSigningRequests in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error)
    +	// Get retrieves the CertificateSigningRequest from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.CertificateSigningRequest, error)
    +	CertificateSigningRequestListerExpansion
    +}
    +
    +// certificateSigningRequestLister implements the CertificateSigningRequestLister interface.
    +type certificateSigningRequestLister struct {
    +	listers.ResourceIndexer[*v1.CertificateSigningRequest]
    +}
    +
    +// NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister.
    +func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister {
    +	return &certificateSigningRequestLister{listers.New[*v1.CertificateSigningRequest](indexer, v1.Resource("certificatesigningrequest"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..616a1f1a09
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// CertificateSigningRequestListerExpansion allows custom methods to be added to
    +// CertificateSigningRequestLister.
    +type CertificateSigningRequestListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go
    new file mode 100644
    index 0000000000..88e5365f40
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/certificates/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterTrustBundleLister helps list ClusterTrustBundles.
    +// All objects returned here must be treated as read-only.
    +type ClusterTrustBundleLister interface {
    +	// List lists all ClusterTrustBundles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error)
    +	// Get retrieves the ClusterTrustBundle from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ClusterTrustBundle, error)
    +	ClusterTrustBundleListerExpansion
    +}
    +
    +// clusterTrustBundleLister implements the ClusterTrustBundleLister interface.
    +type clusterTrustBundleLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ClusterTrustBundle]
    +}
    +
    +// NewClusterTrustBundleLister returns a new ClusterTrustBundleLister.
    +func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister {
    +	return &clusterTrustBundleLister{listers.New[*v1alpha1.ClusterTrustBundle](indexer, v1alpha1.Resource("clustertrustbundle"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..d77258cb2d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ClusterTrustBundleListerExpansion allows custom methods to be added to
    +// ClusterTrustBundleLister.
    +type ClusterTrustBundleListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go
    new file mode 100644
    index 0000000000..84b5ac4a90
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/certificates/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CertificateSigningRequestLister helps list CertificateSigningRequests.
    +// All objects returned here must be treated as read-only.
    +type CertificateSigningRequestLister interface {
    +	// List lists all CertificateSigningRequests in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error)
    +	// Get retrieves the CertificateSigningRequest from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.CertificateSigningRequest, error)
    +	CertificateSigningRequestListerExpansion
    +}
    +
    +// certificateSigningRequestLister implements the CertificateSigningRequestLister interface.
    +type certificateSigningRequestLister struct {
    +	listers.ResourceIndexer[*v1beta1.CertificateSigningRequest]
    +}
    +
    +// NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister.
    +func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister {
    +	return &certificateSigningRequestLister{listers.New[*v1beta1.CertificateSigningRequest](indexer, v1beta1.Resource("certificatesigningrequest"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..68f993cd6e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// CertificateSigningRequestListerExpansion allows custom methods to be added to
    +// CertificateSigningRequestLister.
    +type CertificateSigningRequestListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..ddc494f1c3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// LeaseListerExpansion allows custom methods to be added to
    +// LeaseLister.
    +type LeaseListerExpansion interface{}
    +
    +// LeaseNamespaceListerExpansion allows custom methods to be added to
    +// LeaseNamespaceLister.
    +type LeaseNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go
    new file mode 100644
    index 0000000000..b36d8800e3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/coordination/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseLister helps list Leases.
    +// All objects returned here must be treated as read-only.
    +type LeaseLister interface {
    +	// List lists all Leases in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Lease, err error)
    +	// Leases returns an object that can list and get Leases.
    +	Leases(namespace string) LeaseNamespaceLister
    +	LeaseListerExpansion
    +}
    +
    +// leaseLister implements the LeaseLister interface.
    +type leaseLister struct {
    +	listers.ResourceIndexer[*v1.Lease]
    +}
    +
    +// NewLeaseLister returns a new LeaseLister.
    +func NewLeaseLister(indexer cache.Indexer) LeaseLister {
    +	return &leaseLister{listers.New[*v1.Lease](indexer, v1.Resource("lease"))}
    +}
    +
    +// Leases returns an object that can list and get Leases.
    +func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister {
    +	return leaseNamespaceLister{listers.NewNamespaced[*v1.Lease](s.ResourceIndexer, namespace)}
    +}
    +
    +// LeaseNamespaceLister helps list and get Leases.
    +// All objects returned here must be treated as read-only.
    +type LeaseNamespaceLister interface {
    +	// List lists all Leases in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Lease, err error)
    +	// Get retrieves the Lease from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Lease, error)
    +	LeaseNamespaceListerExpansion
    +}
    +
    +// leaseNamespaceLister implements the LeaseNamespaceLister
    +// interface.
    +type leaseNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Lease]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..233bda975b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// LeaseCandidateListerExpansion allows custom methods to be added to
    +// LeaseCandidateLister.
    +type LeaseCandidateListerExpansion interface{}
    +
    +// LeaseCandidateNamespaceListerExpansion allows custom methods to be added to
    +// LeaseCandidateNamespaceLister.
    +type LeaseCandidateNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go
    new file mode 100644
    index 0000000000..b5e5fac9e4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseCandidateLister helps list LeaseCandidates.
    +// All objects returned here must be treated as read-only.
    +type LeaseCandidateLister interface {
    +	// List lists all LeaseCandidates in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error)
    +	// LeaseCandidates returns an object that can list and get LeaseCandidates.
    +	LeaseCandidates(namespace string) LeaseCandidateNamespaceLister
    +	LeaseCandidateListerExpansion
    +}
    +
    +// leaseCandidateLister implements the LeaseCandidateLister interface.
    +type leaseCandidateLister struct {
    +	listers.ResourceIndexer[*v1alpha1.LeaseCandidate]
    +}
    +
    +// NewLeaseCandidateLister returns a new LeaseCandidateLister.
    +func NewLeaseCandidateLister(indexer cache.Indexer) LeaseCandidateLister {
    +	return &leaseCandidateLister{listers.New[*v1alpha1.LeaseCandidate](indexer, v1alpha1.Resource("leasecandidate"))}
    +}
    +
    +// LeaseCandidates returns an object that can list and get LeaseCandidates.
    +func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateNamespaceLister {
    +	return leaseCandidateNamespaceLister{listers.NewNamespaced[*v1alpha1.LeaseCandidate](s.ResourceIndexer, namespace)}
    +}
    +
    +// LeaseCandidateNamespaceLister helps list and get LeaseCandidates.
    +// All objects returned here must be treated as read-only.
    +type LeaseCandidateNamespaceLister interface {
    +	// List lists all LeaseCandidates in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error)
    +	// Get retrieves the LeaseCandidate from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.LeaseCandidate, error)
    +	LeaseCandidateNamespaceListerExpansion
    +}
    +
    +// leaseCandidateNamespaceLister implements the LeaseCandidateNamespaceLister
    +// interface.
    +type leaseCandidateNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha1.LeaseCandidate]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..dddc53107b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// LeaseListerExpansion allows custom methods to be added to
    +// LeaseLister.
    +type LeaseListerExpansion interface{}
    +
    +// LeaseNamespaceListerExpansion allows custom methods to be added to
    +// LeaseNamespaceLister.
    +type LeaseNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go
    new file mode 100644
    index 0000000000..dbe132696a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/coordination/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// LeaseLister helps list Leases.
    +// All objects returned here must be treated as read-only.
    +type LeaseLister interface {
    +	// List lists all Leases in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Lease, err error)
    +	// Leases returns an object that can list and get Leases.
    +	Leases(namespace string) LeaseNamespaceLister
    +	LeaseListerExpansion
    +}
    +
    +// leaseLister implements the LeaseLister interface.
    +type leaseLister struct {
    +	listers.ResourceIndexer[*v1beta1.Lease]
    +}
    +
    +// NewLeaseLister returns a new LeaseLister.
    +func NewLeaseLister(indexer cache.Indexer) LeaseLister {
    +	return &leaseLister{listers.New[*v1beta1.Lease](indexer, v1beta1.Resource("lease"))}
    +}
    +
    +// Leases returns an object that can list and get Leases.
    +func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister {
    +	return leaseNamespaceLister{listers.NewNamespaced[*v1beta1.Lease](s.ResourceIndexer, namespace)}
    +}
    +
    +// LeaseNamespaceLister helps list and get Leases.
    +// All objects returned here must be treated as read-only.
    +type LeaseNamespaceLister interface {
    +	// List lists all Leases in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Lease, err error)
    +	// Get retrieves the Lease from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Lease, error)
    +	LeaseNamespaceListerExpansion
    +}
    +
    +// leaseNamespaceLister implements the LeaseNamespaceLister
    +// interface.
    +type leaseNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Lease]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go
    new file mode 100644
    index 0000000000..9e3274b5a3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ComponentStatusLister helps list ComponentStatuses.
    +// All objects returned here must be treated as read-only.
    +type ComponentStatusLister interface {
    +	// List lists all ComponentStatuses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ComponentStatus, err error)
    +	// Get retrieves the ComponentStatus from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ComponentStatus, error)
    +	ComponentStatusListerExpansion
    +}
    +
    +// componentStatusLister implements the ComponentStatusLister interface.
    +type componentStatusLister struct {
    +	listers.ResourceIndexer[*v1.ComponentStatus]
    +}
    +
    +// NewComponentStatusLister returns a new ComponentStatusLister.
    +func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister {
    +	return &componentStatusLister{listers.New[*v1.ComponentStatus](indexer, v1.Resource("componentstatus"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go
    new file mode 100644
    index 0000000000..0dde404f2f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ConfigMapLister helps list ConfigMaps.
    +// All objects returned here must be treated as read-only.
    +type ConfigMapLister interface {
    +	// List lists all ConfigMaps in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ConfigMap, err error)
    +	// ConfigMaps returns an object that can list and get ConfigMaps.
    +	ConfigMaps(namespace string) ConfigMapNamespaceLister
    +	ConfigMapListerExpansion
    +}
    +
    +// configMapLister implements the ConfigMapLister interface.
    +type configMapLister struct {
    +	listers.ResourceIndexer[*v1.ConfigMap]
    +}
    +
    +// NewConfigMapLister returns a new ConfigMapLister.
    +func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister {
    +	return &configMapLister{listers.New[*v1.ConfigMap](indexer, v1.Resource("configmap"))}
    +}
    +
    +// ConfigMaps returns an object that can list and get ConfigMaps.
    +func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister {
    +	return configMapNamespaceLister{listers.NewNamespaced[*v1.ConfigMap](s.ResourceIndexer, namespace)}
    +}
    +
    +// ConfigMapNamespaceLister helps list and get ConfigMaps.
    +// All objects returned here must be treated as read-only.
    +type ConfigMapNamespaceLister interface {
    +	// List lists all ConfigMaps in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ConfigMap, err error)
    +	// Get retrieves the ConfigMap from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ConfigMap, error)
    +	ConfigMapNamespaceListerExpansion
    +}
    +
    +// configMapNamespaceLister implements the ConfigMapNamespaceLister
    +// interface.
    +type configMapNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ConfigMap]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go
    new file mode 100644
    index 0000000000..726b432559
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointsLister helps list Endpoints.
    +// All objects returned here must be treated as read-only.
    +type EndpointsLister interface {
    +	// List lists all Endpoints in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Endpoints, err error)
    +	// Endpoints returns an object that can list and get Endpoints.
    +	Endpoints(namespace string) EndpointsNamespaceLister
    +	EndpointsListerExpansion
    +}
    +
    +// endpointsLister implements the EndpointsLister interface.
    +type endpointsLister struct {
    +	listers.ResourceIndexer[*v1.Endpoints]
    +}
    +
    +// NewEndpointsLister returns a new EndpointsLister.
    +func NewEndpointsLister(indexer cache.Indexer) EndpointsLister {
    +	return &endpointsLister{listers.New[*v1.Endpoints](indexer, v1.Resource("endpoints"))}
    +}
    +
    +// Endpoints returns an object that can list and get Endpoints.
    +func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister {
    +	return endpointsNamespaceLister{listers.NewNamespaced[*v1.Endpoints](s.ResourceIndexer, namespace)}
    +}
    +
    +// EndpointsNamespaceLister helps list and get Endpoints.
    +// All objects returned here must be treated as read-only.
    +type EndpointsNamespaceLister interface {
    +	// List lists all Endpoints in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Endpoints, err error)
    +	// Get retrieves the Endpoints from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Endpoints, error)
    +	EndpointsNamespaceListerExpansion
    +}
    +
    +// endpointsNamespaceLister implements the EndpointsNamespaceLister
    +// interface.
    +type endpointsNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Endpoints]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go
    new file mode 100644
    index 0000000000..5ab3a19321
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EventLister helps list Events.
    +// All objects returned here must be treated as read-only.
    +type EventLister interface {
    +	// List lists all Events in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Event, err error)
    +	// Events returns an object that can list and get Events.
    +	Events(namespace string) EventNamespaceLister
    +	EventListerExpansion
    +}
    +
    +// eventLister implements the EventLister interface.
    +type eventLister struct {
    +	listers.ResourceIndexer[*v1.Event]
    +}
    +
    +// NewEventLister returns a new EventLister.
    +func NewEventLister(indexer cache.Indexer) EventLister {
    +	return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))}
    +}
    +
    +// Events returns an object that can list and get Events.
    +func (s *eventLister) Events(namespace string) EventNamespaceLister {
    +	return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)}
    +}
    +
    +// EventNamespaceLister helps list and get Events.
    +// All objects returned here must be treated as read-only.
    +type EventNamespaceLister interface {
    +	// List lists all Events in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Event, err error)
    +	// Get retrieves the Event from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Event, error)
    +	EventNamespaceListerExpansion
    +}
    +
    +// eventNamespaceLister implements the EventNamespaceLister
    +// interface.
    +type eventNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Event]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..2168a7f483
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go
    @@ -0,0 +1,123 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// ComponentStatusListerExpansion allows custom methods to be added to
    +// ComponentStatusLister.
    +type ComponentStatusListerExpansion interface{}
    +
    +// ConfigMapListerExpansion allows custom methods to be added to
    +// ConfigMapLister.
    +type ConfigMapListerExpansion interface{}
    +
    +// ConfigMapNamespaceListerExpansion allows custom methods to be added to
    +// ConfigMapNamespaceLister.
    +type ConfigMapNamespaceListerExpansion interface{}
    +
    +// EndpointsListerExpansion allows custom methods to be added to
    +// EndpointsLister.
    +type EndpointsListerExpansion interface{}
    +
    +// EndpointsNamespaceListerExpansion allows custom methods to be added to
    +// EndpointsNamespaceLister.
    +type EndpointsNamespaceListerExpansion interface{}
    +
    +// EventListerExpansion allows custom methods to be added to
    +// EventLister.
    +type EventListerExpansion interface{}
    +
    +// EventNamespaceListerExpansion allows custom methods to be added to
    +// EventNamespaceLister.
    +type EventNamespaceListerExpansion interface{}
    +
    +// LimitRangeListerExpansion allows custom methods to be added to
    +// LimitRangeLister.
    +type LimitRangeListerExpansion interface{}
    +
    +// LimitRangeNamespaceListerExpansion allows custom methods to be added to
    +// LimitRangeNamespaceLister.
    +type LimitRangeNamespaceListerExpansion interface{}
    +
    +// NamespaceListerExpansion allows custom methods to be added to
    +// NamespaceLister.
    +type NamespaceListerExpansion interface{}
    +
    +// NodeListerExpansion allows custom methods to be added to
    +// NodeLister.
    +type NodeListerExpansion interface{}
    +
    +// PersistentVolumeListerExpansion allows custom methods to be added to
    +// PersistentVolumeLister.
    +type PersistentVolumeListerExpansion interface{}
    +
    +// PersistentVolumeClaimListerExpansion allows custom methods to be added to
    +// PersistentVolumeClaimLister.
    +type PersistentVolumeClaimListerExpansion interface{}
    +
    +// PersistentVolumeClaimNamespaceListerExpansion allows custom methods to be added to
    +// PersistentVolumeClaimNamespaceLister.
    +type PersistentVolumeClaimNamespaceListerExpansion interface{}
    +
    +// PodListerExpansion allows custom methods to be added to
    +// PodLister.
    +type PodListerExpansion interface{}
    +
    +// PodNamespaceListerExpansion allows custom methods to be added to
    +// PodNamespaceLister.
    +type PodNamespaceListerExpansion interface{}
    +
    +// PodTemplateListerExpansion allows custom methods to be added to
    +// PodTemplateLister.
    +type PodTemplateListerExpansion interface{}
    +
    +// PodTemplateNamespaceListerExpansion allows custom methods to be added to
    +// PodTemplateNamespaceLister.
    +type PodTemplateNamespaceListerExpansion interface{}
    +
    +// ResourceQuotaListerExpansion allows custom methods to be added to
    +// ResourceQuotaLister.
    +type ResourceQuotaListerExpansion interface{}
    +
    +// ResourceQuotaNamespaceListerExpansion allows custom methods to be added to
    +// ResourceQuotaNamespaceLister.
    +type ResourceQuotaNamespaceListerExpansion interface{}
    +
    +// SecretListerExpansion allows custom methods to be added to
    +// SecretLister.
    +type SecretListerExpansion interface{}
    +
    +// SecretNamespaceListerExpansion allows custom methods to be added to
    +// SecretNamespaceLister.
    +type SecretNamespaceListerExpansion interface{}
    +
    +// ServiceListerExpansion allows custom methods to be added to
    +// ServiceLister.
    +type ServiceListerExpansion interface{}
    +
    +// ServiceNamespaceListerExpansion allows custom methods to be added to
    +// ServiceNamespaceLister.
    +type ServiceNamespaceListerExpansion interface{}
    +
    +// ServiceAccountListerExpansion allows custom methods to be added to
    +// ServiceAccountLister.
    +type ServiceAccountListerExpansion interface{}
    +
    +// ServiceAccountNamespaceListerExpansion allows custom methods to be added to
    +// ServiceAccountNamespaceLister.
    +type ServiceAccountNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go
    new file mode 100644
    index 0000000000..5c7593cfa9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// LimitRangeLister helps list LimitRanges.
    +// All objects returned here must be treated as read-only.
    +type LimitRangeLister interface {
    +	// List lists all LimitRanges in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.LimitRange, err error)
    +	// LimitRanges returns an object that can list and get LimitRanges.
    +	LimitRanges(namespace string) LimitRangeNamespaceLister
    +	LimitRangeListerExpansion
    +}
    +
    +// limitRangeLister implements the LimitRangeLister interface.
    +type limitRangeLister struct {
    +	listers.ResourceIndexer[*v1.LimitRange]
    +}
    +
    +// NewLimitRangeLister returns a new LimitRangeLister.
    +func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister {
    +	return &limitRangeLister{listers.New[*v1.LimitRange](indexer, v1.Resource("limitrange"))}
    +}
    +
    +// LimitRanges returns an object that can list and get LimitRanges.
    +func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister {
    +	return limitRangeNamespaceLister{listers.NewNamespaced[*v1.LimitRange](s.ResourceIndexer, namespace)}
    +}
    +
    +// LimitRangeNamespaceLister helps list and get LimitRanges.
    +// All objects returned here must be treated as read-only.
    +type LimitRangeNamespaceLister interface {
    +	// List lists all LimitRanges in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.LimitRange, err error)
    +	// Get retrieves the LimitRange from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.LimitRange, error)
    +	LimitRangeNamespaceListerExpansion
    +}
    +
    +// limitRangeNamespaceLister implements the LimitRangeNamespaceLister
    +// interface.
    +type limitRangeNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.LimitRange]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go
    new file mode 100644
    index 0000000000..a016447cfe
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// NamespaceLister helps list Namespaces.
    +// All objects returned here must be treated as read-only.
    +type NamespaceLister interface {
    +	// List lists all Namespaces in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Namespace, err error)
    +	// Get retrieves the Namespace from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Namespace, error)
    +	NamespaceListerExpansion
    +}
    +
    +// namespaceLister implements the NamespaceLister interface.
    +type namespaceLister struct {
    +	listers.ResourceIndexer[*v1.Namespace]
    +}
    +
    +// NewNamespaceLister returns a new NamespaceLister.
    +func NewNamespaceLister(indexer cache.Indexer) NamespaceLister {
    +	return &namespaceLister{listers.New[*v1.Namespace](indexer, v1.Resource("namespace"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go
    new file mode 100644
    index 0000000000..495c6d79d1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// NodeLister helps list Nodes.
    +// All objects returned here must be treated as read-only.
    +type NodeLister interface {
    +	// List lists all Nodes in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Node, err error)
    +	// Get retrieves the Node from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Node, error)
    +	NodeListerExpansion
    +}
    +
    +// nodeLister implements the NodeLister interface.
    +type nodeLister struct {
    +	listers.ResourceIndexer[*v1.Node]
    +}
    +
    +// NewNodeLister returns a new NodeLister.
    +func NewNodeLister(indexer cache.Indexer) NodeLister {
    +	return &nodeLister{listers.New[*v1.Node](indexer, v1.Resource("node"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go
    new file mode 100644
    index 0000000000..17f19bb7a4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PersistentVolumeLister helps list PersistentVolumes.
    +// All objects returned here must be treated as read-only.
    +type PersistentVolumeLister interface {
    +	// List lists all PersistentVolumes in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PersistentVolume, err error)
    +	// Get retrieves the PersistentVolume from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PersistentVolume, error)
    +	PersistentVolumeListerExpansion
    +}
    +
    +// persistentVolumeLister implements the PersistentVolumeLister interface.
    +type persistentVolumeLister struct {
    +	listers.ResourceIndexer[*v1.PersistentVolume]
    +}
    +
    +// NewPersistentVolumeLister returns a new PersistentVolumeLister.
    +func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister {
    +	return &persistentVolumeLister{listers.New[*v1.PersistentVolume](indexer, v1.Resource("persistentvolume"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go
    new file mode 100644
    index 0000000000..ce9df90314
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PersistentVolumeClaimLister helps list PersistentVolumeClaims.
    +// All objects returned here must be treated as read-only.
    +type PersistentVolumeClaimLister interface {
    +	// List lists all PersistentVolumeClaims in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error)
    +	// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims.
    +	PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister
    +	PersistentVolumeClaimListerExpansion
    +}
    +
    +// persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface.
    +type persistentVolumeClaimLister struct {
    +	listers.ResourceIndexer[*v1.PersistentVolumeClaim]
    +}
    +
    +// NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister.
    +func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister {
    +	return &persistentVolumeClaimLister{listers.New[*v1.PersistentVolumeClaim](indexer, v1.Resource("persistentvolumeclaim"))}
    +}
    +
    +// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims.
    +func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister {
    +	return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*v1.PersistentVolumeClaim](s.ResourceIndexer, namespace)}
    +}
    +
    +// PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims.
    +// All objects returned here must be treated as read-only.
    +type PersistentVolumeClaimNamespaceLister interface {
    +	// List lists all PersistentVolumeClaims in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error)
    +	// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PersistentVolumeClaim, error)
    +	PersistentVolumeClaimNamespaceListerExpansion
    +}
    +
    +// persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister
    +// interface.
    +type persistentVolumeClaimNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.PersistentVolumeClaim]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go
    new file mode 100644
    index 0000000000..b17a8382a0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PodLister helps list Pods.
    +// All objects returned here must be treated as read-only.
    +type PodLister interface {
    +	// List lists all Pods in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Pod, err error)
    +	// Pods returns an object that can list and get Pods.
    +	Pods(namespace string) PodNamespaceLister
    +	PodListerExpansion
    +}
    +
    +// podLister implements the PodLister interface.
    +type podLister struct {
    +	listers.ResourceIndexer[*v1.Pod]
    +}
    +
    +// NewPodLister returns a new PodLister.
    +func NewPodLister(indexer cache.Indexer) PodLister {
    +	return &podLister{listers.New[*v1.Pod](indexer, v1.Resource("pod"))}
    +}
    +
    +// Pods returns an object that can list and get Pods.
    +func (s *podLister) Pods(namespace string) PodNamespaceLister {
    +	return podNamespaceLister{listers.NewNamespaced[*v1.Pod](s.ResourceIndexer, namespace)}
    +}
    +
    +// PodNamespaceLister helps list and get Pods.
    +// All objects returned here must be treated as read-only.
    +type PodNamespaceLister interface {
    +	// List lists all Pods in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Pod, err error)
    +	// Get retrieves the Pod from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Pod, error)
    +	PodNamespaceListerExpansion
    +}
    +
    +// podNamespaceLister implements the PodNamespaceLister
    +// interface.
    +type podNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Pod]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go
    new file mode 100644
    index 0000000000..8ac93148f5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PodTemplateLister helps list PodTemplates.
    +// All objects returned here must be treated as read-only.
    +type PodTemplateLister interface {
    +	// List lists all PodTemplates in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PodTemplate, err error)
    +	// PodTemplates returns an object that can list and get PodTemplates.
    +	PodTemplates(namespace string) PodTemplateNamespaceLister
    +	PodTemplateListerExpansion
    +}
    +
    +// podTemplateLister implements the PodTemplateLister interface.
    +type podTemplateLister struct {
    +	listers.ResourceIndexer[*v1.PodTemplate]
    +}
    +
    +// NewPodTemplateLister returns a new PodTemplateLister.
    +func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister {
    +	return &podTemplateLister{listers.New[*v1.PodTemplate](indexer, v1.Resource("podtemplate"))}
    +}
    +
    +// PodTemplates returns an object that can list and get PodTemplates.
    +func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister {
    +	return podTemplateNamespaceLister{listers.NewNamespaced[*v1.PodTemplate](s.ResourceIndexer, namespace)}
    +}
    +
    +// PodTemplateNamespaceLister helps list and get PodTemplates.
    +// All objects returned here must be treated as read-only.
    +type PodTemplateNamespaceLister interface {
    +	// List lists all PodTemplates in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PodTemplate, err error)
    +	// Get retrieves the PodTemplate from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PodTemplate, error)
    +	PodTemplateNamespaceListerExpansion
    +}
    +
    +// podTemplateNamespaceLister implements the PodTemplateNamespaceLister
    +// interface.
    +type podTemplateNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.PodTemplate]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go
    new file mode 100644
    index 0000000000..8ce23fc09a
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicationControllerLister helps list ReplicationControllers.
    +// All objects returned here must be treated as read-only.
    +type ReplicationControllerLister interface {
    +	// List lists all ReplicationControllers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ReplicationController, err error)
    +	// ReplicationControllers returns an object that can list and get ReplicationControllers.
    +	ReplicationControllers(namespace string) ReplicationControllerNamespaceLister
    +	ReplicationControllerListerExpansion
    +}
    +
    +// replicationControllerLister implements the ReplicationControllerLister interface.
    +type replicationControllerLister struct {
    +	listers.ResourceIndexer[*v1.ReplicationController]
    +}
    +
    +// NewReplicationControllerLister returns a new ReplicationControllerLister.
    +func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister {
    +	return &replicationControllerLister{listers.New[*v1.ReplicationController](indexer, v1.Resource("replicationcontroller"))}
    +}
    +
    +// ReplicationControllers returns an object that can list and get ReplicationControllers.
    +func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister {
    +	return replicationControllerNamespaceLister{listers.NewNamespaced[*v1.ReplicationController](s.ResourceIndexer, namespace)}
    +}
    +
    +// ReplicationControllerNamespaceLister helps list and get ReplicationControllers.
    +// All objects returned here must be treated as read-only.
    +type ReplicationControllerNamespaceLister interface {
    +	// List lists all ReplicationControllers in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ReplicationController, err error)
    +	// Get retrieves the ReplicationController from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ReplicationController, error)
    +	ReplicationControllerNamespaceListerExpansion
    +}
    +
    +// replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister
    +// interface.
    +type replicationControllerNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ReplicationController]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go
    new file mode 100644
    index 0000000000..b031d52173
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go
    @@ -0,0 +1,66 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	"k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// ReplicationControllerListerExpansion allows custom methods to be added to
    +// ReplicationControllerLister.
    +type ReplicationControllerListerExpansion interface {
    +	GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error)
    +}
    +
    +// ReplicationControllerNamespaceListerExpansion allows custom methods to be added to
    +// ReplicationControllerNamespaceLister.
    +type ReplicationControllerNamespaceListerExpansion interface{}
    +
    +// GetPodControllers returns a list of ReplicationControllers that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching ReplicationControllers are found.
    +func (s *replicationControllerLister) GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) {
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	items, err := s.ReplicationControllers(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var controllers []*v1.ReplicationController
    +	for i := range items {
    +		rc := items[i]
    +		selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated()
    +
    +		// If an rc with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		controllers = append(controllers, rc)
    +	}
    +
    +	if len(controllers) == 0 {
    +		return nil, fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return controllers, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go
    new file mode 100644
    index 0000000000..4b46194a25
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceQuotaLister helps list ResourceQuotas.
    +// All objects returned here must be treated as read-only.
    +type ResourceQuotaLister interface {
    +	// List lists all ResourceQuotas in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ResourceQuota, err error)
    +	// ResourceQuotas returns an object that can list and get ResourceQuotas.
    +	ResourceQuotas(namespace string) ResourceQuotaNamespaceLister
    +	ResourceQuotaListerExpansion
    +}
    +
    +// resourceQuotaLister implements the ResourceQuotaLister interface.
    +type resourceQuotaLister struct {
    +	listers.ResourceIndexer[*v1.ResourceQuota]
    +}
    +
    +// NewResourceQuotaLister returns a new ResourceQuotaLister.
    +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister {
    +	return &resourceQuotaLister{listers.New[*v1.ResourceQuota](indexer, v1.Resource("resourcequota"))}
    +}
    +
    +// ResourceQuotas returns an object that can list and get ResourceQuotas.
    +func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister {
    +	return resourceQuotaNamespaceLister{listers.NewNamespaced[*v1.ResourceQuota](s.ResourceIndexer, namespace)}
    +}
    +
    +// ResourceQuotaNamespaceLister helps list and get ResourceQuotas.
    +// All objects returned here must be treated as read-only.
    +type ResourceQuotaNamespaceLister interface {
    +	// List lists all ResourceQuotas in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ResourceQuota, err error)
    +	// Get retrieves the ResourceQuota from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ResourceQuota, error)
    +	ResourceQuotaNamespaceListerExpansion
    +}
    +
    +// resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister
    +// interface.
    +type resourceQuotaNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ResourceQuota]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go
    new file mode 100644
    index 0000000000..47a0c9a084
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// SecretLister helps list Secrets.
    +// All objects returned here must be treated as read-only.
    +type SecretLister interface {
    +	// List lists all Secrets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Secret, err error)
    +	// Secrets returns an object that can list and get Secrets.
    +	Secrets(namespace string) SecretNamespaceLister
    +	SecretListerExpansion
    +}
    +
    +// secretLister implements the SecretLister interface.
    +type secretLister struct {
    +	listers.ResourceIndexer[*v1.Secret]
    +}
    +
    +// NewSecretLister returns a new SecretLister.
    +func NewSecretLister(indexer cache.Indexer) SecretLister {
    +	return &secretLister{listers.New[*v1.Secret](indexer, v1.Resource("secret"))}
    +}
    +
    +// Secrets returns an object that can list and get Secrets.
    +func (s *secretLister) Secrets(namespace string) SecretNamespaceLister {
    +	return secretNamespaceLister{listers.NewNamespaced[*v1.Secret](s.ResourceIndexer, namespace)}
    +}
    +
    +// SecretNamespaceLister helps list and get Secrets.
    +// All objects returned here must be treated as read-only.
    +type SecretNamespaceLister interface {
    +	// List lists all Secrets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Secret, err error)
    +	// Get retrieves the Secret from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Secret, error)
    +	SecretNamespaceListerExpansion
    +}
    +
    +// secretNamespaceLister implements the SecretNamespaceLister
    +// interface.
    +type secretNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Secret]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go
    new file mode 100644
    index 0000000000..536fb337f9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceLister helps list Services.
    +// All objects returned here must be treated as read-only.
    +type ServiceLister interface {
    +	// List lists all Services in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Service, err error)
    +	// Services returns an object that can list and get Services.
    +	Services(namespace string) ServiceNamespaceLister
    +	ServiceListerExpansion
    +}
    +
    +// serviceLister implements the ServiceLister interface.
    +type serviceLister struct {
    +	listers.ResourceIndexer[*v1.Service]
    +}
    +
    +// NewServiceLister returns a new ServiceLister.
    +func NewServiceLister(indexer cache.Indexer) ServiceLister {
    +	return &serviceLister{listers.New[*v1.Service](indexer, v1.Resource("service"))}
    +}
    +
    +// Services returns an object that can list and get Services.
    +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister {
    +	return serviceNamespaceLister{listers.NewNamespaced[*v1.Service](s.ResourceIndexer, namespace)}
    +}
    +
    +// ServiceNamespaceLister helps list and get Services.
    +// All objects returned here must be treated as read-only.
    +type ServiceNamespaceLister interface {
    +	// List lists all Services in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Service, err error)
    +	// Get retrieves the Service from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Service, error)
    +	ServiceNamespaceListerExpansion
    +}
    +
    +// serviceNamespaceLister implements the ServiceNamespaceLister
    +// interface.
    +type serviceNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Service]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go
    new file mode 100644
    index 0000000000..8a4af4f4cb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/core/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceAccountLister helps list ServiceAccounts.
    +// All objects returned here must be treated as read-only.
    +type ServiceAccountLister interface {
    +	// List lists all ServiceAccounts in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ServiceAccount, err error)
    +	// ServiceAccounts returns an object that can list and get ServiceAccounts.
    +	ServiceAccounts(namespace string) ServiceAccountNamespaceLister
    +	ServiceAccountListerExpansion
    +}
    +
    +// serviceAccountLister implements the ServiceAccountLister interface.
    +type serviceAccountLister struct {
    +	listers.ResourceIndexer[*v1.ServiceAccount]
    +}
    +
    +// NewServiceAccountLister returns a new ServiceAccountLister.
    +func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister {
    +	return &serviceAccountLister{listers.New[*v1.ServiceAccount](indexer, v1.Resource("serviceaccount"))}
    +}
    +
    +// ServiceAccounts returns an object that can list and get ServiceAccounts.
    +func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister {
    +	return serviceAccountNamespaceLister{listers.NewNamespaced[*v1.ServiceAccount](s.ResourceIndexer, namespace)}
    +}
    +
    +// ServiceAccountNamespaceLister helps list and get ServiceAccounts.
    +// All objects returned here must be treated as read-only.
    +type ServiceAccountNamespaceLister interface {
    +	// List lists all ServiceAccounts in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ServiceAccount, err error)
    +	// Get retrieves the ServiceAccount from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ServiceAccount, error)
    +	ServiceAccountNamespaceListerExpansion
    +}
    +
    +// serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister
    +// interface.
    +type serviceAccountNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.ServiceAccount]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go
    new file mode 100644
    index 0000000000..dcb18f19a8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/discovery/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointSliceLister helps list EndpointSlices.
    +// All objects returned here must be treated as read-only.
    +type EndpointSliceLister interface {
    +	// List lists all EndpointSlices in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.EndpointSlice, err error)
    +	// EndpointSlices returns an object that can list and get EndpointSlices.
    +	EndpointSlices(namespace string) EndpointSliceNamespaceLister
    +	EndpointSliceListerExpansion
    +}
    +
    +// endpointSliceLister implements the EndpointSliceLister interface.
    +type endpointSliceLister struct {
    +	listers.ResourceIndexer[*v1.EndpointSlice]
    +}
    +
    +// NewEndpointSliceLister returns a new EndpointSliceLister.
    +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister {
    +	return &endpointSliceLister{listers.New[*v1.EndpointSlice](indexer, v1.Resource("endpointslice"))}
    +}
    +
    +// EndpointSlices returns an object that can list and get EndpointSlices.
    +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister {
    +	return endpointSliceNamespaceLister{listers.NewNamespaced[*v1.EndpointSlice](s.ResourceIndexer, namespace)}
    +}
    +
    +// EndpointSliceNamespaceLister helps list and get EndpointSlices.
    +// All objects returned here must be treated as read-only.
    +type EndpointSliceNamespaceLister interface {
    +	// List lists all EndpointSlices in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.EndpointSlice, err error)
    +	// Get retrieves the EndpointSlice from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.EndpointSlice, error)
    +	EndpointSliceNamespaceListerExpansion
    +}
    +
    +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister
    +// interface.
    +type endpointSliceNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.EndpointSlice]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/discovery/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..660163eeef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/discovery/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// EndpointSliceListerExpansion allows custom methods to be added to
    +// EndpointSliceLister.
    +type EndpointSliceListerExpansion interface{}
    +
    +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to
    +// EndpointSliceNamespaceLister.
    +type EndpointSliceNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go
    new file mode 100644
    index 0000000000..d3762f5c28
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/discovery/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EndpointSliceLister helps list EndpointSlices.
    +// All objects returned here must be treated as read-only.
    +type EndpointSliceLister interface {
    +	// List lists all EndpointSlices in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error)
    +	// EndpointSlices returns an object that can list and get EndpointSlices.
    +	EndpointSlices(namespace string) EndpointSliceNamespaceLister
    +	EndpointSliceListerExpansion
    +}
    +
    +// endpointSliceLister implements the EndpointSliceLister interface.
    +type endpointSliceLister struct {
    +	listers.ResourceIndexer[*v1beta1.EndpointSlice]
    +}
    +
    +// NewEndpointSliceLister returns a new EndpointSliceLister.
    +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister {
    +	return &endpointSliceLister{listers.New[*v1beta1.EndpointSlice](indexer, v1beta1.Resource("endpointslice"))}
    +}
    +
    +// EndpointSlices returns an object that can list and get EndpointSlices.
    +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister {
    +	return endpointSliceNamespaceLister{listers.NewNamespaced[*v1beta1.EndpointSlice](s.ResourceIndexer, namespace)}
    +}
    +
    +// EndpointSliceNamespaceLister helps list and get EndpointSlices.
    +// All objects returned here must be treated as read-only.
    +type EndpointSliceNamespaceLister interface {
    +	// List lists all EndpointSlices in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error)
    +	// Get retrieves the EndpointSlice from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.EndpointSlice, error)
    +	EndpointSliceNamespaceListerExpansion
    +}
    +
    +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister
    +// interface.
    +type endpointSliceNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.EndpointSlice]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..9619bbd8dd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// EndpointSliceListerExpansion allows custom methods to be added to
    +// EndpointSliceLister.
    +type EndpointSliceListerExpansion interface{}
    +
    +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to
    +// EndpointSliceNamespaceLister.
    +type EndpointSliceNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/doc.go b/vendor/k8s.io/client-go/listers/doc.go
    new file mode 100644
    index 0000000000..96c330c931
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/doc.go
    @@ -0,0 +1,18 @@
    +/*
    +Copyright 2023 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Package listers provides generated listers for Kubernetes APIs.
    +package listers // import "k8s.io/client-go/listers"
    diff --git a/vendor/k8s.io/client-go/listers/events/v1/event.go b/vendor/k8s.io/client-go/listers/events/v1/event.go
    new file mode 100644
    index 0000000000..66e3c64669
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/events/v1/event.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/events/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EventLister helps list Events.
    +// All objects returned here must be treated as read-only.
    +type EventLister interface {
    +	// List lists all Events in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Event, err error)
    +	// Events returns an object that can list and get Events.
    +	Events(namespace string) EventNamespaceLister
    +	EventListerExpansion
    +}
    +
    +// eventLister implements the EventLister interface.
    +type eventLister struct {
    +	listers.ResourceIndexer[*v1.Event]
    +}
    +
    +// NewEventLister returns a new EventLister.
    +func NewEventLister(indexer cache.Indexer) EventLister {
    +	return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))}
    +}
    +
    +// Events returns an object that can list and get Events.
    +func (s *eventLister) Events(namespace string) EventNamespaceLister {
    +	return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)}
    +}
    +
    +// EventNamespaceLister helps list and get Events.
    +// All objects returned here must be treated as read-only.
    +type EventNamespaceLister interface {
    +	// List lists all Events in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Event, err error)
    +	// Get retrieves the Event from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Event, error)
    +	EventNamespaceListerExpansion
    +}
    +
    +// eventNamespaceLister implements the EventNamespaceLister
    +// interface.
    +type eventNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Event]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/events/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/events/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..348e784d74
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/events/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// EventListerExpansion allows custom methods to be added to
    +// EventLister.
    +type EventListerExpansion interface{}
    +
    +// EventNamespaceListerExpansion allows custom methods to be added to
    +// EventNamespaceLister.
    +type EventNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go
    new file mode 100644
    index 0000000000..3d51bb2656
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/events/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EventLister helps list Events.
    +// All objects returned here must be treated as read-only.
    +type EventLister interface {
    +	// List lists all Events in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Event, err error)
    +	// Events returns an object that can list and get Events.
    +	Events(namespace string) EventNamespaceLister
    +	EventListerExpansion
    +}
    +
    +// eventLister implements the EventLister interface.
    +type eventLister struct {
    +	listers.ResourceIndexer[*v1beta1.Event]
    +}
    +
    +// NewEventLister returns a new EventLister.
    +func NewEventLister(indexer cache.Indexer) EventLister {
    +	return &eventLister{listers.New[*v1beta1.Event](indexer, v1beta1.Resource("event"))}
    +}
    +
    +// Events returns an object that can list and get Events.
    +func (s *eventLister) Events(namespace string) EventNamespaceLister {
    +	return eventNamespaceLister{listers.NewNamespaced[*v1beta1.Event](s.ResourceIndexer, namespace)}
    +}
    +
    +// EventNamespaceLister helps list and get Events.
    +// All objects returned here must be treated as read-only.
    +type EventNamespaceLister interface {
    +	// List lists all Events in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Event, err error)
    +	// Get retrieves the Event from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Event, error)
    +	EventNamespaceListerExpansion
    +}
    +
    +// eventNamespaceLister implements the EventNamespaceLister
    +// interface.
    +type eventNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Event]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..d311691d9d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// EventListerExpansion allows custom methods to be added to
    +// EventLister.
    +type EventListerExpansion interface{}
    +
    +// EventNamespaceListerExpansion allows custom methods to be added to
    +// EventNamespaceLister.
    +type EventNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go
    new file mode 100644
    index 0000000000..4510b42360
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/extensions/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DaemonSetLister helps list DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetLister interface {
    +	// List lists all DaemonSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error)
    +	// DaemonSets returns an object that can list and get DaemonSets.
    +	DaemonSets(namespace string) DaemonSetNamespaceLister
    +	DaemonSetListerExpansion
    +}
    +
    +// daemonSetLister implements the DaemonSetLister interface.
    +type daemonSetLister struct {
    +	listers.ResourceIndexer[*v1beta1.DaemonSet]
    +}
    +
    +// NewDaemonSetLister returns a new DaemonSetLister.
    +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister {
    +	return &daemonSetLister{listers.New[*v1beta1.DaemonSet](indexer, v1beta1.Resource("daemonset"))}
    +}
    +
    +// DaemonSets returns an object that can list and get DaemonSets.
    +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister {
    +	return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta1.DaemonSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// DaemonSetNamespaceLister helps list and get DaemonSets.
    +// All objects returned here must be treated as read-only.
    +type DaemonSetNamespaceLister interface {
    +	// List lists all DaemonSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error)
    +	// Get retrieves the DaemonSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.DaemonSet, error)
    +	DaemonSetNamespaceListerExpansion
    +}
    +
    +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister
    +// interface.
    +type daemonSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.DaemonSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go
    new file mode 100644
    index 0000000000..f6dd7a963e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go
    @@ -0,0 +1,115 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta1
    +
    +import (
    +	"fmt"
    +
    +	apps "k8s.io/api/apps/v1beta1"
    +	"k8s.io/api/core/v1"
    +	"k8s.io/api/extensions/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// DaemonSetListerExpansion allows custom methods to be added to
    +// DaemonSetLister.
    +type DaemonSetListerExpansion interface {
    +	GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error)
    +	GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error)
    +}
    +
    +// DaemonSetNamespaceListerExpansion allows custom methods to be added to
    +// DaemonSetNamespaceLister.
    +type DaemonSetNamespaceListerExpansion interface{}
    +
    +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error) {
    +	var selector labels.Selector
    +	var daemonSet *v1beta1.DaemonSet
    +
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.DaemonSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*v1beta1.DaemonSet
    +	for i := range list {
    +		daemonSet = list[i]
    +		if daemonSet.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, daemonSet)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    +
    +// GetHistoryDaemonSets returns a list of DaemonSets that potentially
    +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef
    +// will actually manage it.
    +// Returns an error only if no matching DaemonSets are found.
    +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error) {
    +	if len(history.Labels) == 0 {
    +		return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name)
    +	}
    +
    +	list, err := s.DaemonSets(history.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var daemonSets []*v1beta1.DaemonSet
    +	for _, ds := range list {
    +		selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the history object
    +			continue
    +		}
    +		// If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) {
    +			continue
    +		}
    +		daemonSets = append(daemonSets, ds)
    +	}
    +
    +	if len(daemonSets) == 0 {
    +		return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels)
    +	}
    +
    +	return daemonSets, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go
    new file mode 100644
    index 0000000000..149047c973
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/extensions/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DeploymentLister helps list Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentLister interface {
    +	// List lists all Deployments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Deployment, err error)
    +	// Deployments returns an object that can list and get Deployments.
    +	Deployments(namespace string) DeploymentNamespaceLister
    +	DeploymentListerExpansion
    +}
    +
    +// deploymentLister implements the DeploymentLister interface.
    +type deploymentLister struct {
    +	listers.ResourceIndexer[*v1beta1.Deployment]
    +}
    +
    +// NewDeploymentLister returns a new DeploymentLister.
    +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister {
    +	return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))}
    +}
    +
    +// Deployments returns an object that can list and get Deployments.
    +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister {
    +	return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)}
    +}
    +
    +// DeploymentNamespaceLister helps list and get Deployments.
    +// All objects returned here must be treated as read-only.
    +type DeploymentNamespaceLister interface {
    +	// List lists all Deployments in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Deployment, err error)
    +	// Get retrieves the Deployment from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Deployment, error)
    +	DeploymentNamespaceListerExpansion
    +}
    +
    +// deploymentNamespaceLister implements the DeploymentNamespaceLister
    +// interface.
    +type deploymentNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Deployment]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..4c65dbf764
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go
    @@ -0,0 +1,43 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// DeploymentListerExpansion allows custom methods to be added to
    +// DeploymentLister.
    +type DeploymentListerExpansion interface{}
    +
    +// DeploymentNamespaceListerExpansion allows custom methods to be added to
    +// DeploymentNamespaceLister.
    +type DeploymentNamespaceListerExpansion interface{}
    +
    +// IngressListerExpansion allows custom methods to be added to
    +// IngressLister.
    +type IngressListerExpansion interface{}
    +
    +// IngressNamespaceListerExpansion allows custom methods to be added to
    +// IngressNamespaceLister.
    +type IngressNamespaceListerExpansion interface{}
    +
    +// NetworkPolicyListerExpansion allows custom methods to be added to
    +// NetworkPolicyLister.
    +type NetworkPolicyListerExpansion interface{}
    +
    +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to
    +// NetworkPolicyNamespaceLister.
    +type NetworkPolicyNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go
    new file mode 100644
    index 0000000000..b714eebb35
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/extensions/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressLister helps list Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressLister interface {
    +	// List lists all Ingresses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
    +	// Ingresses returns an object that can list and get Ingresses.
    +	Ingresses(namespace string) IngressNamespaceLister
    +	IngressListerExpansion
    +}
    +
    +// ingressLister implements the IngressLister interface.
    +type ingressLister struct {
    +	listers.ResourceIndexer[*v1beta1.Ingress]
    +}
    +
    +// NewIngressLister returns a new IngressLister.
    +func NewIngressLister(indexer cache.Indexer) IngressLister {
    +	return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))}
    +}
    +
    +// Ingresses returns an object that can list and get Ingresses.
    +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister {
    +	return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)}
    +}
    +
    +// IngressNamespaceLister helps list and get Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressNamespaceLister interface {
    +	// List lists all Ingresses in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
    +	// Get retrieves the Ingress from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Ingress, error)
    +	IngressNamespaceListerExpansion
    +}
    +
    +// ingressNamespaceLister implements the IngressNamespaceLister
    +// interface.
    +type ingressNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Ingress]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go
    new file mode 100644
    index 0000000000..b31099c266
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/extensions/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// NetworkPolicyLister helps list NetworkPolicies.
    +// All objects returned here must be treated as read-only.
    +type NetworkPolicyLister interface {
    +	// List lists all NetworkPolicies in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error)
    +	// NetworkPolicies returns an object that can list and get NetworkPolicies.
    +	NetworkPolicies(namespace string) NetworkPolicyNamespaceLister
    +	NetworkPolicyListerExpansion
    +}
    +
    +// networkPolicyLister implements the NetworkPolicyLister interface.
    +type networkPolicyLister struct {
    +	listers.ResourceIndexer[*v1beta1.NetworkPolicy]
    +}
    +
    +// NewNetworkPolicyLister returns a new NetworkPolicyLister.
    +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister {
    +	return &networkPolicyLister{listers.New[*v1beta1.NetworkPolicy](indexer, v1beta1.Resource("networkpolicy"))}
    +}
    +
    +// NetworkPolicies returns an object that can list and get NetworkPolicies.
    +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister {
    +	return networkPolicyNamespaceLister{listers.NewNamespaced[*v1beta1.NetworkPolicy](s.ResourceIndexer, namespace)}
    +}
    +
    +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies.
    +// All objects returned here must be treated as read-only.
    +type NetworkPolicyNamespaceLister interface {
    +	// List lists all NetworkPolicies in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error)
    +	// Get retrieves the NetworkPolicy from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.NetworkPolicy, error)
    +	NetworkPolicyNamespaceListerExpansion
    +}
    +
    +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister
    +// interface.
    +type networkPolicyNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.NetworkPolicy]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go
    new file mode 100644
    index 0000000000..604bee80ba
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/extensions/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ReplicaSetLister helps list ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetLister interface {
    +	// List lists all ReplicaSets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error)
    +	// ReplicaSets returns an object that can list and get ReplicaSets.
    +	ReplicaSets(namespace string) ReplicaSetNamespaceLister
    +	ReplicaSetListerExpansion
    +}
    +
    +// replicaSetLister implements the ReplicaSetLister interface.
    +type replicaSetLister struct {
    +	listers.ResourceIndexer[*v1beta1.ReplicaSet]
    +}
    +
    +// NewReplicaSetLister returns a new ReplicaSetLister.
    +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister {
    +	return &replicaSetLister{listers.New[*v1beta1.ReplicaSet](indexer, v1beta1.Resource("replicaset"))}
    +}
    +
    +// ReplicaSets returns an object that can list and get ReplicaSets.
    +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister {
    +	return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta1.ReplicaSet](s.ResourceIndexer, namespace)}
    +}
    +
    +// ReplicaSetNamespaceLister helps list and get ReplicaSets.
    +// All objects returned here must be treated as read-only.
    +type ReplicaSetNamespaceLister interface {
    +	// List lists all ReplicaSets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error)
    +	// Get retrieves the ReplicaSet from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ReplicaSet, error)
    +	ReplicaSetNamespaceListerExpansion
    +}
    +
    +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister
    +// interface.
    +type replicaSetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.ReplicaSet]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go
    new file mode 100644
    index 0000000000..74114c2bd7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go
    @@ -0,0 +1,74 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta1
    +
    +import (
    +	"fmt"
    +
    +	"k8s.io/api/core/v1"
    +	extensions "k8s.io/api/extensions/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// ReplicaSetListerExpansion allows custom methods to be added to
    +// ReplicaSetLister.
    +type ReplicaSetListerExpansion interface {
    +	GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error)
    +}
    +
    +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to
    +// ReplicaSetNamespaceLister.
    +type ReplicaSetNamespaceListerExpansion interface{}
    +
    +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod.
    +// Only the one specified in the Pod's ControllerRef will actually manage it.
    +// Returns an error only if no matching ReplicaSets are found.
    +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error) {
    +	if len(pod.Labels) == 0 {
    +		return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
    +	}
    +
    +	list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var rss []*extensions.ReplicaSet
    +	for _, rs := range list {
    +		if rs.Namespace != pod.Namespace {
    +			continue
    +		}
    +		selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		rss = append(rss, rs)
    +	}
    +
    +	if len(rss) == 0 {
    +		return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return rss, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..70b5eb5b17
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// FlowSchemaListerExpansion allows custom methods to be added to
    +// FlowSchemaLister.
    +type FlowSchemaListerExpansion interface{}
    +
    +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to
    +// PriorityLevelConfigurationLister.
    +type PriorityLevelConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go
    new file mode 100644
    index 0000000000..ba7e514874
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/flowcontrol/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaLister helps list FlowSchemas.
    +// All objects returned here must be treated as read-only.
    +type FlowSchemaLister interface {
    +	// List lists all FlowSchemas in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.FlowSchema, err error)
    +	// Get retrieves the FlowSchema from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.FlowSchema, error)
    +	FlowSchemaListerExpansion
    +}
    +
    +// flowSchemaLister implements the FlowSchemaLister interface.
    +type flowSchemaLister struct {
    +	listers.ResourceIndexer[*v1.FlowSchema]
    +}
    +
    +// NewFlowSchemaLister returns a new FlowSchemaLister.
    +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister {
    +	return &flowSchemaLister{listers.New[*v1.FlowSchema](indexer, v1.Resource("flowschema"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..61f5b9fe6d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/flowcontrol/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations.
    +// All objects returned here must be treated as read-only.
    +type PriorityLevelConfigurationLister interface {
    +	// List lists all PriorityLevelConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error)
    +	// Get retrieves the PriorityLevelConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PriorityLevelConfiguration, error)
    +	PriorityLevelConfigurationListerExpansion
    +}
    +
    +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface.
    +type priorityLevelConfigurationLister struct {
    +	listers.ResourceIndexer[*v1.PriorityLevelConfiguration]
    +}
    +
    +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister.
    +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister {
    +	return &priorityLevelConfigurationLister{listers.New[*v1.PriorityLevelConfiguration](indexer, v1.Resource("prioritylevelconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..c674e951e0
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// FlowSchemaListerExpansion allows custom methods to be added to
    +// FlowSchemaLister.
    +type FlowSchemaListerExpansion interface{}
    +
    +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to
    +// PriorityLevelConfigurationLister.
    +type PriorityLevelConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go
    new file mode 100644
    index 0000000000..59bca6ce4e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaLister helps list FlowSchemas.
    +// All objects returned here must be treated as read-only.
    +type FlowSchemaLister interface {
    +	// List lists all FlowSchemas in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error)
    +	// Get retrieves the FlowSchema from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.FlowSchema, error)
    +	FlowSchemaListerExpansion
    +}
    +
    +// flowSchemaLister implements the FlowSchemaLister interface.
    +type flowSchemaLister struct {
    +	listers.ResourceIndexer[*v1beta1.FlowSchema]
    +}
    +
    +// NewFlowSchemaLister returns a new FlowSchemaLister.
    +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister {
    +	return &flowSchemaLister{listers.New[*v1beta1.FlowSchema](indexer, v1beta1.Resource("flowschema"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..902f7cc4bb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/flowcontrol/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations.
    +// All objects returned here must be treated as read-only.
    +type PriorityLevelConfigurationLister interface {
    +	// List lists all PriorityLevelConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error)
    +	// Get retrieves the PriorityLevelConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.PriorityLevelConfiguration, error)
    +	PriorityLevelConfigurationListerExpansion
    +}
    +
    +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface.
    +type priorityLevelConfigurationLister struct {
    +	listers.ResourceIndexer[*v1beta1.PriorityLevelConfiguration]
    +}
    +
    +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister.
    +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister {
    +	return &priorityLevelConfigurationLister{listers.New[*v1beta1.PriorityLevelConfiguration](indexer, v1beta1.Resource("prioritylevelconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go
    new file mode 100644
    index 0000000000..b658de6549
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +// FlowSchemaListerExpansion allows custom methods to be added to
    +// FlowSchemaLister.
    +type FlowSchemaListerExpansion interface{}
    +
    +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to
    +// PriorityLevelConfigurationLister.
    +type PriorityLevelConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go
    new file mode 100644
    index 0000000000..721c5f6bdd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaLister helps list FlowSchemas.
    +// All objects returned here must be treated as read-only.
    +type FlowSchemaLister interface {
    +	// List lists all FlowSchemas in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error)
    +	// Get retrieves the FlowSchema from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.FlowSchema, error)
    +	FlowSchemaListerExpansion
    +}
    +
    +// flowSchemaLister implements the FlowSchemaLister interface.
    +type flowSchemaLister struct {
    +	listers.ResourceIndexer[*v1beta2.FlowSchema]
    +}
    +
    +// NewFlowSchemaLister returns a new FlowSchemaLister.
    +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister {
    +	return &flowSchemaLister{listers.New[*v1beta2.FlowSchema](indexer, v1beta2.Resource("flowschema"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..3e8a2134fc
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta2
    +
    +import (
    +	v1beta2 "k8s.io/api/flowcontrol/v1beta2"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations.
    +// All objects returned here must be treated as read-only.
    +type PriorityLevelConfigurationLister interface {
    +	// List lists all PriorityLevelConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error)
    +	// Get retrieves the PriorityLevelConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta2.PriorityLevelConfiguration, error)
    +	PriorityLevelConfigurationListerExpansion
    +}
    +
    +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface.
    +type priorityLevelConfigurationLister struct {
    +	listers.ResourceIndexer[*v1beta2.PriorityLevelConfiguration]
    +}
    +
    +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister.
    +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister {
    +	return &priorityLevelConfigurationLister{listers.New[*v1beta2.PriorityLevelConfiguration](indexer, v1beta2.Resource("prioritylevelconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go
    new file mode 100644
    index 0000000000..5c14f337b7
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +// FlowSchemaListerExpansion allows custom methods to be added to
    +// FlowSchemaLister.
    +type FlowSchemaListerExpansion interface{}
    +
    +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to
    +// PriorityLevelConfigurationLister.
    +type PriorityLevelConfigurationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go
    new file mode 100644
    index 0000000000..c5555fd64d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +import (
    +	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// FlowSchemaLister helps list FlowSchemas.
    +// All objects returned here must be treated as read-only.
    +type FlowSchemaLister interface {
    +	// List lists all FlowSchemas in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error)
    +	// Get retrieves the FlowSchema from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta3.FlowSchema, error)
    +	FlowSchemaListerExpansion
    +}
    +
    +// flowSchemaLister implements the FlowSchemaLister interface.
    +type flowSchemaLister struct {
    +	listers.ResourceIndexer[*v1beta3.FlowSchema]
    +}
    +
    +// NewFlowSchemaLister returns a new FlowSchemaLister.
    +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister {
    +	return &flowSchemaLister{listers.New[*v1beta3.FlowSchema](indexer, v1beta3.Resource("flowschema"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go
    new file mode 100644
    index 0000000000..9f7d89c548
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta3
    +
    +import (
    +	v1beta3 "k8s.io/api/flowcontrol/v1beta3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations.
    +// All objects returned here must be treated as read-only.
    +type PriorityLevelConfigurationLister interface {
    +	// List lists all PriorityLevelConfigurations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error)
    +	// Get retrieves the PriorityLevelConfiguration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta3.PriorityLevelConfiguration, error)
    +	PriorityLevelConfigurationListerExpansion
    +}
    +
    +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface.
    +type priorityLevelConfigurationLister struct {
    +	listers.ResourceIndexer[*v1beta3.PriorityLevelConfiguration]
    +}
    +
    +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister.
    +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister {
    +	return &priorityLevelConfigurationLister{listers.New[*v1beta3.PriorityLevelConfiguration](indexer, v1beta3.Resource("prioritylevelconfiguration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/generic_helpers.go b/vendor/k8s.io/client-go/listers/generic_helpers.go
    new file mode 100644
    index 0000000000..c69bb22b11
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/generic_helpers.go
    @@ -0,0 +1,72 @@
    +/*
    +Copyright 2023 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package listers
    +
    +import (
    +	"k8s.io/apimachinery/pkg/api/errors"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/runtime/schema"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceIndexer wraps an indexer, resource, and optional namespace for a given type.
    +// This is intended for use by listers (generated by lister-gen) only.
    +type ResourceIndexer[T runtime.Object] struct {
    +	indexer   cache.Indexer
    +	resource  schema.GroupResource
    +	namespace string // empty for non-namespaced types
    +}
    +
    +// New returns a new instance of a lister (resource indexer) wrapping the given indexer and resource for the specified type.
    +// This is intended for use by listers (generated by lister-gen) only.
    +func New[T runtime.Object](indexer cache.Indexer, resource schema.GroupResource) ResourceIndexer[T] {
    +	return ResourceIndexer[T]{indexer: indexer, resource: resource}
    +}
    +
    +// NewNamespaced returns a new instance of a namespaced lister (resource indexer) wrapping the given parent and namespace for the specified type.
    +// This is intended for use by listers (generated by lister-gen) only.
    +func NewNamespaced[T runtime.Object](parent ResourceIndexer[T], namespace string) ResourceIndexer[T] {
    +	return ResourceIndexer[T]{indexer: parent.indexer, resource: parent.resource, namespace: namespace}
    +}
    +
    +// List lists all resources in the indexer matching the given selector.
    +func (l ResourceIndexer[T]) List(selector labels.Selector) (ret []T, err error) {
    +	// ListAllByNamespace reverts to ListAll on empty namespaces
    +	err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) {
    +		ret = append(ret, m.(T))
    +	})
    +	return ret, err
    +}
    +
    +// Get retrieves the resource from the index for a given name.
    +func (l ResourceIndexer[T]) Get(name string) (T, error) {
    +	var key string
    +	if l.namespace == "" {
    +		key = name
    +	} else {
    +		key = l.namespace + "/" + name
    +	}
    +	obj, exists, err := l.indexer.GetByKey(key)
    +	if err != nil {
    +		return *new(T), err
    +	}
    +	if !exists {
    +		return *new(T), errors.NewNotFound(l.resource, name)
    +	}
    +	return obj.(T), nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..a380c2418f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// IngressListerExpansion allows custom methods to be added to
    +// IngressLister.
    +type IngressListerExpansion interface{}
    +
    +// IngressNamespaceListerExpansion allows custom methods to be added to
    +// IngressNamespaceLister.
    +type IngressNamespaceListerExpansion interface{}
    +
    +// IngressClassListerExpansion allows custom methods to be added to
    +// IngressClassLister.
    +type IngressClassListerExpansion interface{}
    +
    +// NetworkPolicyListerExpansion allows custom methods to be added to
    +// NetworkPolicyLister.
    +type NetworkPolicyListerExpansion interface{}
    +
    +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to
    +// NetworkPolicyNamespaceLister.
    +type NetworkPolicyNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go
    new file mode 100644
    index 0000000000..3007cd3492
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/networking/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressLister helps list Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressLister interface {
    +	// List lists all Ingresses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Ingress, err error)
    +	// Ingresses returns an object that can list and get Ingresses.
    +	Ingresses(namespace string) IngressNamespaceLister
    +	IngressListerExpansion
    +}
    +
    +// ingressLister implements the IngressLister interface.
    +type ingressLister struct {
    +	listers.ResourceIndexer[*v1.Ingress]
    +}
    +
    +// NewIngressLister returns a new IngressLister.
    +func NewIngressLister(indexer cache.Indexer) IngressLister {
    +	return &ingressLister{listers.New[*v1.Ingress](indexer, v1.Resource("ingress"))}
    +}
    +
    +// Ingresses returns an object that can list and get Ingresses.
    +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister {
    +	return ingressNamespaceLister{listers.NewNamespaced[*v1.Ingress](s.ResourceIndexer, namespace)}
    +}
    +
    +// IngressNamespaceLister helps list and get Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressNamespaceLister interface {
    +	// List lists all Ingresses in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Ingress, err error)
    +	// Get retrieves the Ingress from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Ingress, error)
    +	IngressNamespaceListerExpansion
    +}
    +
    +// ingressNamespaceLister implements the IngressNamespaceLister
    +// interface.
    +type ingressNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Ingress]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go
    new file mode 100644
    index 0000000000..a8efe5c5e3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/networking/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressClassLister helps list IngressClasses.
    +// All objects returned here must be treated as read-only.
    +type IngressClassLister interface {
    +	// List lists all IngressClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.IngressClass, err error)
    +	// Get retrieves the IngressClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.IngressClass, error)
    +	IngressClassListerExpansion
    +}
    +
    +// ingressClassLister implements the IngressClassLister interface.
    +type ingressClassLister struct {
    +	listers.ResourceIndexer[*v1.IngressClass]
    +}
    +
    +// NewIngressClassLister returns a new IngressClassLister.
    +func NewIngressClassLister(indexer cache.Indexer) IngressClassLister {
    +	return &ingressClassLister{listers.New[*v1.IngressClass](indexer, v1.Resource("ingressclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go
    new file mode 100644
    index 0000000000..9a3e3172ef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/networking/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// NetworkPolicyLister helps list NetworkPolicies.
    +// All objects returned here must be treated as read-only.
    +type NetworkPolicyLister interface {
    +	// List lists all NetworkPolicies in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error)
    +	// NetworkPolicies returns an object that can list and get NetworkPolicies.
    +	NetworkPolicies(namespace string) NetworkPolicyNamespaceLister
    +	NetworkPolicyListerExpansion
    +}
    +
    +// networkPolicyLister implements the NetworkPolicyLister interface.
    +type networkPolicyLister struct {
    +	listers.ResourceIndexer[*v1.NetworkPolicy]
    +}
    +
    +// NewNetworkPolicyLister returns a new NetworkPolicyLister.
    +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister {
    +	return &networkPolicyLister{listers.New[*v1.NetworkPolicy](indexer, v1.Resource("networkpolicy"))}
    +}
    +
    +// NetworkPolicies returns an object that can list and get NetworkPolicies.
    +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister {
    +	return networkPolicyNamespaceLister{listers.NewNamespaced[*v1.NetworkPolicy](s.ResourceIndexer, namespace)}
    +}
    +
    +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies.
    +// All objects returned here must be treated as read-only.
    +type NetworkPolicyNamespaceLister interface {
    +	// List lists all NetworkPolicies in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error)
    +	// Get retrieves the NetworkPolicy from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.NetworkPolicy, error)
    +	NetworkPolicyNamespaceListerExpansion
    +}
    +
    +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister
    +// interface.
    +type networkPolicyNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.NetworkPolicy]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..fc7316521b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// IPAddressListerExpansion allows custom methods to be added to
    +// IPAddressLister.
    +type IPAddressListerExpansion interface{}
    +
    +// ServiceCIDRListerExpansion allows custom methods to be added to
    +// ServiceCIDRLister.
    +type ServiceCIDRListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go
    new file mode 100644
    index 0000000000..749affd7b9
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/networking/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IPAddressLister helps list IPAddresses.
    +// All objects returned here must be treated as read-only.
    +type IPAddressLister interface {
    +	// List lists all IPAddresses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error)
    +	// Get retrieves the IPAddress from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.IPAddress, error)
    +	IPAddressListerExpansion
    +}
    +
    +// iPAddressLister implements the IPAddressLister interface.
    +type iPAddressLister struct {
    +	listers.ResourceIndexer[*v1alpha1.IPAddress]
    +}
    +
    +// NewIPAddressLister returns a new IPAddressLister.
    +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister {
    +	return &iPAddressLister{listers.New[*v1alpha1.IPAddress](indexer, v1alpha1.Resource("ipaddress"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go
    new file mode 100644
    index 0000000000..8be2d11af4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/networking/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceCIDRLister helps list ServiceCIDRs.
    +// All objects returned here must be treated as read-only.
    +type ServiceCIDRLister interface {
    +	// List lists all ServiceCIDRs in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error)
    +	// Get retrieves the ServiceCIDR from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ServiceCIDR, error)
    +	ServiceCIDRListerExpansion
    +}
    +
    +// serviceCIDRLister implements the ServiceCIDRLister interface.
    +type serviceCIDRLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ServiceCIDR]
    +}
    +
    +// NewServiceCIDRLister returns a new ServiceCIDRLister.
    +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister {
    +	return &serviceCIDRLister{listers.New[*v1alpha1.ServiceCIDR](indexer, v1alpha1.Resource("servicecidr"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..320af736e6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go
    @@ -0,0 +1,39 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// IPAddressListerExpansion allows custom methods to be added to
    +// IPAddressLister.
    +type IPAddressListerExpansion interface{}
    +
    +// IngressListerExpansion allows custom methods to be added to
    +// IngressLister.
    +type IngressListerExpansion interface{}
    +
    +// IngressNamespaceListerExpansion allows custom methods to be added to
    +// IngressNamespaceLister.
    +type IngressNamespaceListerExpansion interface{}
    +
    +// IngressClassListerExpansion allows custom methods to be added to
    +// IngressClassLister.
    +type IngressClassListerExpansion interface{}
    +
    +// ServiceCIDRListerExpansion allows custom methods to be added to
    +// ServiceCIDRLister.
    +type ServiceCIDRListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go
    new file mode 100644
    index 0000000000..b8fe99e240
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressLister helps list Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressLister interface {
    +	// List lists all Ingresses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
    +	// Ingresses returns an object that can list and get Ingresses.
    +	Ingresses(namespace string) IngressNamespaceLister
    +	IngressListerExpansion
    +}
    +
    +// ingressLister implements the IngressLister interface.
    +type ingressLister struct {
    +	listers.ResourceIndexer[*v1beta1.Ingress]
    +}
    +
    +// NewIngressLister returns a new IngressLister.
    +func NewIngressLister(indexer cache.Indexer) IngressLister {
    +	return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))}
    +}
    +
    +// Ingresses returns an object that can list and get Ingresses.
    +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister {
    +	return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)}
    +}
    +
    +// IngressNamespaceLister helps list and get Ingresses.
    +// All objects returned here must be treated as read-only.
    +type IngressNamespaceLister interface {
    +	// List lists all Ingresses in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
    +	// Get retrieves the Ingress from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Ingress, error)
    +	IngressNamespaceListerExpansion
    +}
    +
    +// ingressNamespaceLister implements the IngressNamespaceLister
    +// interface.
    +type ingressNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Ingress]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go
    new file mode 100644
    index 0000000000..a5e33525f6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IngressClassLister helps list IngressClasses.
    +// All objects returned here must be treated as read-only.
    +type IngressClassLister interface {
    +	// List lists all IngressClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error)
    +	// Get retrieves the IngressClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.IngressClass, error)
    +	IngressClassListerExpansion
    +}
    +
    +// ingressClassLister implements the IngressClassLister interface.
    +type ingressClassLister struct {
    +	listers.ResourceIndexer[*v1beta1.IngressClass]
    +}
    +
    +// NewIngressClassLister returns a new IngressClassLister.
    +func NewIngressClassLister(indexer cache.Indexer) IngressClassLister {
    +	return &ingressClassLister{listers.New[*v1beta1.IngressClass](indexer, v1beta1.Resource("ingressclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go
    new file mode 100644
    index 0000000000..3614066701
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// IPAddressLister helps list IPAddresses.
    +// All objects returned here must be treated as read-only.
    +type IPAddressLister interface {
    +	// List lists all IPAddresses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.IPAddress, err error)
    +	// Get retrieves the IPAddress from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.IPAddress, error)
    +	IPAddressListerExpansion
    +}
    +
    +// iPAddressLister implements the IPAddressLister interface.
    +type iPAddressLister struct {
    +	listers.ResourceIndexer[*v1beta1.IPAddress]
    +}
    +
    +// NewIPAddressLister returns a new IPAddressLister.
    +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister {
    +	return &iPAddressLister{listers.New[*v1beta1.IPAddress](indexer, v1beta1.Resource("ipaddress"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go
    new file mode 100644
    index 0000000000..2902fa7f15
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/networking/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ServiceCIDRLister helps list ServiceCIDRs.
    +// All objects returned here must be treated as read-only.
    +type ServiceCIDRLister interface {
    +	// List lists all ServiceCIDRs in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ServiceCIDR, err error)
    +	// Get retrieves the ServiceCIDR from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ServiceCIDR, error)
    +	ServiceCIDRListerExpansion
    +}
    +
    +// serviceCIDRLister implements the ServiceCIDRLister interface.
    +type serviceCIDRLister struct {
    +	listers.ResourceIndexer[*v1beta1.ServiceCIDR]
    +}
    +
    +// NewServiceCIDRLister returns a new ServiceCIDRLister.
    +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister {
    +	return &serviceCIDRLister{listers.New[*v1beta1.ServiceCIDR](indexer, v1beta1.Resource("servicecidr"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..4f010b87c3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// RuntimeClassListerExpansion allows custom methods to be added to
    +// RuntimeClassLister.
    +type RuntimeClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go
    new file mode 100644
    index 0000000000..17b88687e2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/node/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassLister helps list RuntimeClasses.
    +// All objects returned here must be treated as read-only.
    +type RuntimeClassLister interface {
    +	// List lists all RuntimeClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.RuntimeClass, err error)
    +	// Get retrieves the RuntimeClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.RuntimeClass, error)
    +	RuntimeClassListerExpansion
    +}
    +
    +// runtimeClassLister implements the RuntimeClassLister interface.
    +type runtimeClassLister struct {
    +	listers.ResourceIndexer[*v1.RuntimeClass]
    +}
    +
    +// NewRuntimeClassLister returns a new RuntimeClassLister.
    +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister {
    +	return &runtimeClassLister{listers.New[*v1.RuntimeClass](indexer, v1.Resource("runtimeclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..a65c208fac
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// RuntimeClassListerExpansion allows custom methods to be added to
    +// RuntimeClassLister.
    +type RuntimeClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go
    new file mode 100644
    index 0000000000..1f6e06f489
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/node/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassLister helps list RuntimeClasses.
    +// All objects returned here must be treated as read-only.
    +type RuntimeClassLister interface {
    +	// List lists all RuntimeClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error)
    +	// Get retrieves the RuntimeClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.RuntimeClass, error)
    +	RuntimeClassListerExpansion
    +}
    +
    +// runtimeClassLister implements the RuntimeClassLister interface.
    +type runtimeClassLister struct {
    +	listers.ResourceIndexer[*v1alpha1.RuntimeClass]
    +}
    +
    +// NewRuntimeClassLister returns a new RuntimeClassLister.
    +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister {
    +	return &runtimeClassLister{listers.New[*v1alpha1.RuntimeClass](indexer, v1alpha1.Resource("runtimeclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..a6744055ce
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// RuntimeClassListerExpansion allows custom methods to be added to
    +// RuntimeClassLister.
    +type RuntimeClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go
    new file mode 100644
    index 0000000000..cd0cdf3c52
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/node/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RuntimeClassLister helps list RuntimeClasses.
    +// All objects returned here must be treated as read-only.
    +type RuntimeClassLister interface {
    +	// List lists all RuntimeClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error)
    +	// Get retrieves the RuntimeClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.RuntimeClass, error)
    +	RuntimeClassListerExpansion
    +}
    +
    +// runtimeClassLister implements the RuntimeClassLister interface.
    +type runtimeClassLister struct {
    +	listers.ResourceIndexer[*v1beta1.RuntimeClass]
    +}
    +
    +// NewRuntimeClassLister returns a new RuntimeClassLister.
    +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister {
    +	return &runtimeClassLister{listers.New[*v1beta1.RuntimeClass](indexer, v1beta1.Resource("runtimeclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go
    new file mode 100644
    index 0000000000..83695668fa
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/policy/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EvictionLister helps list Evictions.
    +// All objects returned here must be treated as read-only.
    +type EvictionLister interface {
    +	// List lists all Evictions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Eviction, err error)
    +	// Evictions returns an object that can list and get Evictions.
    +	Evictions(namespace string) EvictionNamespaceLister
    +	EvictionListerExpansion
    +}
    +
    +// evictionLister implements the EvictionLister interface.
    +type evictionLister struct {
    +	listers.ResourceIndexer[*v1.Eviction]
    +}
    +
    +// NewEvictionLister returns a new EvictionLister.
    +func NewEvictionLister(indexer cache.Indexer) EvictionLister {
    +	return &evictionLister{listers.New[*v1.Eviction](indexer, v1.Resource("eviction"))}
    +}
    +
    +// Evictions returns an object that can list and get Evictions.
    +func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister {
    +	return evictionNamespaceLister{listers.NewNamespaced[*v1.Eviction](s.ResourceIndexer, namespace)}
    +}
    +
    +// EvictionNamespaceLister helps list and get Evictions.
    +// All objects returned here must be treated as read-only.
    +type EvictionNamespaceLister interface {
    +	// List lists all Evictions in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Eviction, err error)
    +	// Get retrieves the Eviction from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Eviction, error)
    +	EvictionNamespaceListerExpansion
    +}
    +
    +// evictionNamespaceLister implements the EvictionNamespaceLister
    +// interface.
    +type evictionNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Eviction]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..8e2d55a911
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// EvictionListerExpansion allows custom methods to be added to
    +// EvictionLister.
    +type EvictionListerExpansion interface{}
    +
    +// EvictionNamespaceListerExpansion allows custom methods to be added to
    +// EvictionNamespaceLister.
    +type EvictionNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go
    new file mode 100644
    index 0000000000..38ed8144eb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/policy/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PodDisruptionBudgetLister helps list PodDisruptionBudgets.
    +// All objects returned here must be treated as read-only.
    +type PodDisruptionBudgetLister interface {
    +	// List lists all PodDisruptionBudgets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error)
    +	// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets.
    +	PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister
    +	PodDisruptionBudgetListerExpansion
    +}
    +
    +// podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface.
    +type podDisruptionBudgetLister struct {
    +	listers.ResourceIndexer[*v1.PodDisruptionBudget]
    +}
    +
    +// NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister.
    +func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister {
    +	return &podDisruptionBudgetLister{listers.New[*v1.PodDisruptionBudget](indexer, v1.Resource("poddisruptionbudget"))}
    +}
    +
    +// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets.
    +func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister {
    +	return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1.PodDisruptionBudget](s.ResourceIndexer, namespace)}
    +}
    +
    +// PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets.
    +// All objects returned here must be treated as read-only.
    +type PodDisruptionBudgetNamespaceLister interface {
    +	// List lists all PodDisruptionBudgets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error)
    +	// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PodDisruptionBudget, error)
    +	PodDisruptionBudgetNamespaceListerExpansion
    +}
    +
    +// podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister
    +// interface.
    +type podDisruptionBudgetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.PodDisruptionBudget]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget_expansion.go
    new file mode 100644
    index 0000000000..115ee3f004
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget_expansion.go
    @@ -0,0 +1,68 @@
    +/*
    +Copyright 2021 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1
    +
    +import (
    +	"fmt"
    +
    +	"k8s.io/api/core/v1"
    +	policy "k8s.io/api/policy/v1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// PodDisruptionBudgetListerExpansion allows custom methods to be added to
    +// PodDisruptionBudgetLister.
    +type PodDisruptionBudgetListerExpansion interface {
    +	GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error)
    +}
    +
    +// PodDisruptionBudgetNamespaceListerExpansion allows custom methods to be added to
    +// PodDisruptionBudgetNamespaceLister.
    +type PodDisruptionBudgetNamespaceListerExpansion interface{}
    +
    +// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod.
    +func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) {
    +	var selector labels.Selector
    +
    +	list, err := s.PodDisruptionBudgets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var pdbList []*policy.PodDisruptionBudget
    +	for i := range list {
    +		pdb := list[i]
    +		selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// Unlike the v1beta version, here we let an empty selector match everything.
    +		if !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		pdbList = append(pdbList, pdb)
    +	}
    +
    +	if len(pdbList) == 0 {
    +		return nil, fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return pdbList, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go
    new file mode 100644
    index 0000000000..0aff833524
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/policy/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// EvictionLister helps list Evictions.
    +// All objects returned here must be treated as read-only.
    +type EvictionLister interface {
    +	// List lists all Evictions in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Eviction, err error)
    +	// Evictions returns an object that can list and get Evictions.
    +	Evictions(namespace string) EvictionNamespaceLister
    +	EvictionListerExpansion
    +}
    +
    +// evictionLister implements the EvictionLister interface.
    +type evictionLister struct {
    +	listers.ResourceIndexer[*v1beta1.Eviction]
    +}
    +
    +// NewEvictionLister returns a new EvictionLister.
    +func NewEvictionLister(indexer cache.Indexer) EvictionLister {
    +	return &evictionLister{listers.New[*v1beta1.Eviction](indexer, v1beta1.Resource("eviction"))}
    +}
    +
    +// Evictions returns an object that can list and get Evictions.
    +func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister {
    +	return evictionNamespaceLister{listers.NewNamespaced[*v1beta1.Eviction](s.ResourceIndexer, namespace)}
    +}
    +
    +// EvictionNamespaceLister helps list and get Evictions.
    +// All objects returned here must be treated as read-only.
    +type EvictionNamespaceLister interface {
    +	// List lists all Evictions in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Eviction, err error)
    +	// Get retrieves the Eviction from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Eviction, error)
    +	EvictionNamespaceListerExpansion
    +}
    +
    +// evictionNamespaceLister implements the EvictionNamespaceLister
    +// interface.
    +type evictionNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Eviction]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..eba5e2f72d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go
    @@ -0,0 +1,27 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// EvictionListerExpansion allows custom methods to be added to
    +// EvictionLister.
    +type EvictionListerExpansion interface{}
    +
    +// EvictionNamespaceListerExpansion allows custom methods to be added to
    +// EvictionNamespaceLister.
    +type EvictionNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go
    new file mode 100644
    index 0000000000..55ae892e2b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/policy/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PodDisruptionBudgetLister helps list PodDisruptionBudgets.
    +// All objects returned here must be treated as read-only.
    +type PodDisruptionBudgetLister interface {
    +	// List lists all PodDisruptionBudgets in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error)
    +	// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets.
    +	PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister
    +	PodDisruptionBudgetListerExpansion
    +}
    +
    +// podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface.
    +type podDisruptionBudgetLister struct {
    +	listers.ResourceIndexer[*v1beta1.PodDisruptionBudget]
    +}
    +
    +// NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister.
    +func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister {
    +	return &podDisruptionBudgetLister{listers.New[*v1beta1.PodDisruptionBudget](indexer, v1beta1.Resource("poddisruptionbudget"))}
    +}
    +
    +// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets.
    +func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister {
    +	return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)}
    +}
    +
    +// PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets.
    +// All objects returned here must be treated as read-only.
    +type PodDisruptionBudgetNamespaceLister interface {
    +	// List lists all PodDisruptionBudgets in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error)
    +	// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.PodDisruptionBudget, error)
    +	PodDisruptionBudgetNamespaceListerExpansion
    +}
    +
    +// podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister
    +// interface.
    +type podDisruptionBudgetNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.PodDisruptionBudget]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go
    new file mode 100644
    index 0000000000..994947c4f3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go
    @@ -0,0 +1,68 @@
    +/*
    +Copyright 2017 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package v1beta1
    +
    +import (
    +	"fmt"
    +
    +	"k8s.io/api/core/v1"
    +	policy "k8s.io/api/policy/v1beta1"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +)
    +
    +// PodDisruptionBudgetListerExpansion allows custom methods to be added to
    +// PodDisruptionBudgetLister.
    +type PodDisruptionBudgetListerExpansion interface {
    +	GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error)
    +}
    +
    +// PodDisruptionBudgetNamespaceListerExpansion allows custom methods to be added to
    +// PodDisruptionBudgetNamespaceLister.
    +type PodDisruptionBudgetNamespaceListerExpansion interface{}
    +
    +// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod.  Returns an error only if no matching PodDisruptionBudgets are found.
    +func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) {
    +	var selector labels.Selector
    +
    +	list, err := s.PodDisruptionBudgets(pod.Namespace).List(labels.Everything())
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var pdbList []*policy.PodDisruptionBudget
    +	for i := range list {
    +		pdb := list[i]
    +		selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
    +		if err != nil {
    +			// This object has an invalid selector, it does not match the pod
    +			continue
    +		}
    +
    +		// If a PDB with a nil or empty selector creeps in, it should match nothing, not everything.
    +		if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
    +			continue
    +		}
    +		pdbList = append(pdbList, pdb)
    +	}
    +
    +	if len(pdbList) == 0 {
    +		return nil, fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
    +	}
    +
    +	return pdbList, nil
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go
    new file mode 100644
    index 0000000000..11a4cb4db4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/rbac/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleLister helps list ClusterRoles.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleLister interface {
    +	// List lists all ClusterRoles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ClusterRole, err error)
    +	// Get retrieves the ClusterRole from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ClusterRole, error)
    +	ClusterRoleListerExpansion
    +}
    +
    +// clusterRoleLister implements the ClusterRoleLister interface.
    +type clusterRoleLister struct {
    +	listers.ResourceIndexer[*v1.ClusterRole]
    +}
    +
    +// NewClusterRoleLister returns a new ClusterRoleLister.
    +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister {
    +	return &clusterRoleLister{listers.New[*v1.ClusterRole](indexer, v1.Resource("clusterrole"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..4c3583bb94
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/rbac/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingLister helps list ClusterRoleBindings.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleBindingLister interface {
    +	// List lists all ClusterRoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error)
    +	// Get retrieves the ClusterRoleBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.ClusterRoleBinding, error)
    +	ClusterRoleBindingListerExpansion
    +}
    +
    +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface.
    +type clusterRoleBindingLister struct {
    +	listers.ResourceIndexer[*v1.ClusterRoleBinding]
    +}
    +
    +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister.
    +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister {
    +	return &clusterRoleBindingLister{listers.New[*v1.ClusterRoleBinding](indexer, v1.Resource("clusterrolebinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..0eb2a6d114
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go
    @@ -0,0 +1,43 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// ClusterRoleListerExpansion allows custom methods to be added to
    +// ClusterRoleLister.
    +type ClusterRoleListerExpansion interface{}
    +
    +// ClusterRoleBindingListerExpansion allows custom methods to be added to
    +// ClusterRoleBindingLister.
    +type ClusterRoleBindingListerExpansion interface{}
    +
    +// RoleListerExpansion allows custom methods to be added to
    +// RoleLister.
    +type RoleListerExpansion interface{}
    +
    +// RoleNamespaceListerExpansion allows custom methods to be added to
    +// RoleNamespaceLister.
    +type RoleNamespaceListerExpansion interface{}
    +
    +// RoleBindingListerExpansion allows custom methods to be added to
    +// RoleBindingLister.
    +type RoleBindingListerExpansion interface{}
    +
    +// RoleBindingNamespaceListerExpansion allows custom methods to be added to
    +// RoleBindingNamespaceLister.
    +type RoleBindingNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go
    new file mode 100644
    index 0000000000..3e94253215
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/rbac/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleLister helps list Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleLister interface {
    +	// List lists all Roles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Role, err error)
    +	// Roles returns an object that can list and get Roles.
    +	Roles(namespace string) RoleNamespaceLister
    +	RoleListerExpansion
    +}
    +
    +// roleLister implements the RoleLister interface.
    +type roleLister struct {
    +	listers.ResourceIndexer[*v1.Role]
    +}
    +
    +// NewRoleLister returns a new RoleLister.
    +func NewRoleLister(indexer cache.Indexer) RoleLister {
    +	return &roleLister{listers.New[*v1.Role](indexer, v1.Resource("role"))}
    +}
    +
    +// Roles returns an object that can list and get Roles.
    +func (s *roleLister) Roles(namespace string) RoleNamespaceLister {
    +	return roleNamespaceLister{listers.NewNamespaced[*v1.Role](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleNamespaceLister helps list and get Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleNamespaceLister interface {
    +	// List lists all Roles in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.Role, err error)
    +	// Get retrieves the Role from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.Role, error)
    +	RoleNamespaceListerExpansion
    +}
    +
    +// roleNamespaceLister implements the RoleNamespaceLister
    +// interface.
    +type roleNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.Role]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go
    new file mode 100644
    index 0000000000..1b3162a113
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/rbac/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingLister helps list RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingLister interface {
    +	// List lists all RoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.RoleBinding, err error)
    +	// RoleBindings returns an object that can list and get RoleBindings.
    +	RoleBindings(namespace string) RoleBindingNamespaceLister
    +	RoleBindingListerExpansion
    +}
    +
    +// roleBindingLister implements the RoleBindingLister interface.
    +type roleBindingLister struct {
    +	listers.ResourceIndexer[*v1.RoleBinding]
    +}
    +
    +// NewRoleBindingLister returns a new RoleBindingLister.
    +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister {
    +	return &roleBindingLister{listers.New[*v1.RoleBinding](indexer, v1.Resource("rolebinding"))}
    +}
    +
    +// RoleBindings returns an object that can list and get RoleBindings.
    +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister {
    +	return roleBindingNamespaceLister{listers.NewNamespaced[*v1.RoleBinding](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleBindingNamespaceLister helps list and get RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingNamespaceLister interface {
    +	// List lists all RoleBindings in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.RoleBinding, err error)
    +	// Get retrieves the RoleBinding from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.RoleBinding, error)
    +	RoleBindingNamespaceListerExpansion
    +}
    +
    +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister
    +// interface.
    +type roleBindingNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.RoleBinding]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go
    new file mode 100644
    index 0000000000..5e5bbaa5a2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleLister helps list ClusterRoles.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleLister interface {
    +	// List lists all ClusterRoles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error)
    +	// Get retrieves the ClusterRole from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ClusterRole, error)
    +	ClusterRoleListerExpansion
    +}
    +
    +// clusterRoleLister implements the ClusterRoleLister interface.
    +type clusterRoleLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ClusterRole]
    +}
    +
    +// NewClusterRoleLister returns a new ClusterRoleLister.
    +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister {
    +	return &clusterRoleLister{listers.New[*v1alpha1.ClusterRole](indexer, v1alpha1.Resource("clusterrole"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..d825d0a2f4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingLister helps list ClusterRoleBindings.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleBindingLister interface {
    +	// List lists all ClusterRoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error)
    +	// Get retrieves the ClusterRoleBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.ClusterRoleBinding, error)
    +	ClusterRoleBindingListerExpansion
    +}
    +
    +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface.
    +type clusterRoleBindingLister struct {
    +	listers.ResourceIndexer[*v1alpha1.ClusterRoleBinding]
    +}
    +
    +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister.
    +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister {
    +	return &clusterRoleBindingLister{listers.New[*v1alpha1.ClusterRoleBinding](indexer, v1alpha1.Resource("clusterrolebinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..2d4ad1756e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go
    @@ -0,0 +1,43 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// ClusterRoleListerExpansion allows custom methods to be added to
    +// ClusterRoleLister.
    +type ClusterRoleListerExpansion interface{}
    +
    +// ClusterRoleBindingListerExpansion allows custom methods to be added to
    +// ClusterRoleBindingLister.
    +type ClusterRoleBindingListerExpansion interface{}
    +
    +// RoleListerExpansion allows custom methods to be added to
    +// RoleLister.
    +type RoleListerExpansion interface{}
    +
    +// RoleNamespaceListerExpansion allows custom methods to be added to
    +// RoleNamespaceLister.
    +type RoleNamespaceListerExpansion interface{}
    +
    +// RoleBindingListerExpansion allows custom methods to be added to
    +// RoleBindingLister.
    +type RoleBindingListerExpansion interface{}
    +
    +// RoleBindingNamespaceListerExpansion allows custom methods to be added to
    +// RoleBindingNamespaceLister.
    +type RoleBindingNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go
    new file mode 100644
    index 0000000000..f3d2b2838d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleLister helps list Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleLister interface {
    +	// List lists all Roles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.Role, err error)
    +	// Roles returns an object that can list and get Roles.
    +	Roles(namespace string) RoleNamespaceLister
    +	RoleListerExpansion
    +}
    +
    +// roleLister implements the RoleLister interface.
    +type roleLister struct {
    +	listers.ResourceIndexer[*v1alpha1.Role]
    +}
    +
    +// NewRoleLister returns a new RoleLister.
    +func NewRoleLister(indexer cache.Indexer) RoleLister {
    +	return &roleLister{listers.New[*v1alpha1.Role](indexer, v1alpha1.Resource("role"))}
    +}
    +
    +// Roles returns an object that can list and get Roles.
    +func (s *roleLister) Roles(namespace string) RoleNamespaceLister {
    +	return roleNamespaceLister{listers.NewNamespaced[*v1alpha1.Role](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleNamespaceLister helps list and get Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleNamespaceLister interface {
    +	// List lists all Roles in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.Role, err error)
    +	// Get retrieves the Role from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.Role, error)
    +	RoleNamespaceListerExpansion
    +}
    +
    +// roleNamespaceLister implements the RoleNamespaceLister
    +// interface.
    +type roleNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha1.Role]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go
    new file mode 100644
    index 0000000000..6d6f7b7005
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/rbac/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingLister helps list RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingLister interface {
    +	// List lists all RoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error)
    +	// RoleBindings returns an object that can list and get RoleBindings.
    +	RoleBindings(namespace string) RoleBindingNamespaceLister
    +	RoleBindingListerExpansion
    +}
    +
    +// roleBindingLister implements the RoleBindingLister interface.
    +type roleBindingLister struct {
    +	listers.ResourceIndexer[*v1alpha1.RoleBinding]
    +}
    +
    +// NewRoleBindingLister returns a new RoleBindingLister.
    +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister {
    +	return &roleBindingLister{listers.New[*v1alpha1.RoleBinding](indexer, v1alpha1.Resource("rolebinding"))}
    +}
    +
    +// RoleBindings returns an object that can list and get RoleBindings.
    +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister {
    +	return roleBindingNamespaceLister{listers.NewNamespaced[*v1alpha1.RoleBinding](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleBindingNamespaceLister helps list and get RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingNamespaceLister interface {
    +	// List lists all RoleBindings in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error)
    +	// Get retrieves the RoleBinding from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.RoleBinding, error)
    +	RoleBindingNamespaceListerExpansion
    +}
    +
    +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister
    +// interface.
    +type roleBindingNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha1.RoleBinding]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go
    new file mode 100644
    index 0000000000..bade032628
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/rbac/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleLister helps list ClusterRoles.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleLister interface {
    +	// List lists all ClusterRoles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error)
    +	// Get retrieves the ClusterRole from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ClusterRole, error)
    +	ClusterRoleListerExpansion
    +}
    +
    +// clusterRoleLister implements the ClusterRoleLister interface.
    +type clusterRoleLister struct {
    +	listers.ResourceIndexer[*v1beta1.ClusterRole]
    +}
    +
    +// NewClusterRoleLister returns a new ClusterRoleLister.
    +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister {
    +	return &clusterRoleLister{listers.New[*v1beta1.ClusterRole](indexer, v1beta1.Resource("clusterrole"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go
    new file mode 100644
    index 0000000000..1f4d391bef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/rbac/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ClusterRoleBindingLister helps list ClusterRoleBindings.
    +// All objects returned here must be treated as read-only.
    +type ClusterRoleBindingLister interface {
    +	// List lists all ClusterRoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error)
    +	// Get retrieves the ClusterRoleBinding from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.ClusterRoleBinding, error)
    +	ClusterRoleBindingListerExpansion
    +}
    +
    +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface.
    +type clusterRoleBindingLister struct {
    +	listers.ResourceIndexer[*v1beta1.ClusterRoleBinding]
    +}
    +
    +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister.
    +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister {
    +	return &clusterRoleBindingLister{listers.New[*v1beta1.ClusterRoleBinding](indexer, v1beta1.Resource("clusterrolebinding"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..51f674bd0f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go
    @@ -0,0 +1,43 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// ClusterRoleListerExpansion allows custom methods to be added to
    +// ClusterRoleLister.
    +type ClusterRoleListerExpansion interface{}
    +
    +// ClusterRoleBindingListerExpansion allows custom methods to be added to
    +// ClusterRoleBindingLister.
    +type ClusterRoleBindingListerExpansion interface{}
    +
    +// RoleListerExpansion allows custom methods to be added to
    +// RoleLister.
    +type RoleListerExpansion interface{}
    +
    +// RoleNamespaceListerExpansion allows custom methods to be added to
    +// RoleNamespaceLister.
    +type RoleNamespaceListerExpansion interface{}
    +
    +// RoleBindingListerExpansion allows custom methods to be added to
    +// RoleBindingLister.
    +type RoleBindingListerExpansion interface{}
    +
    +// RoleBindingNamespaceListerExpansion allows custom methods to be added to
    +// RoleBindingNamespaceLister.
    +type RoleBindingNamespaceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go
    new file mode 100644
    index 0000000000..71666a9a03
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/rbac/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleLister helps list Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleLister interface {
    +	// List lists all Roles in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Role, err error)
    +	// Roles returns an object that can list and get Roles.
    +	Roles(namespace string) RoleNamespaceLister
    +	RoleListerExpansion
    +}
    +
    +// roleLister implements the RoleLister interface.
    +type roleLister struct {
    +	listers.ResourceIndexer[*v1beta1.Role]
    +}
    +
    +// NewRoleLister returns a new RoleLister.
    +func NewRoleLister(indexer cache.Indexer) RoleLister {
    +	return &roleLister{listers.New[*v1beta1.Role](indexer, v1beta1.Resource("role"))}
    +}
    +
    +// Roles returns an object that can list and get Roles.
    +func (s *roleLister) Roles(namespace string) RoleNamespaceLister {
    +	return roleNamespaceLister{listers.NewNamespaced[*v1beta1.Role](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleNamespaceLister helps list and get Roles.
    +// All objects returned here must be treated as read-only.
    +type RoleNamespaceLister interface {
    +	// List lists all Roles in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.Role, err error)
    +	// Get retrieves the Role from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.Role, error)
    +	RoleNamespaceListerExpansion
    +}
    +
    +// roleNamespaceLister implements the RoleNamespaceLister
    +// interface.
    +type roleNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.Role]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go
    new file mode 100644
    index 0000000000..00f8542cbd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/rbac/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// RoleBindingLister helps list RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingLister interface {
    +	// List lists all RoleBindings in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error)
    +	// RoleBindings returns an object that can list and get RoleBindings.
    +	RoleBindings(namespace string) RoleBindingNamespaceLister
    +	RoleBindingListerExpansion
    +}
    +
    +// roleBindingLister implements the RoleBindingLister interface.
    +type roleBindingLister struct {
    +	listers.ResourceIndexer[*v1beta1.RoleBinding]
    +}
    +
    +// NewRoleBindingLister returns a new RoleBindingLister.
    +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister {
    +	return &roleBindingLister{listers.New[*v1beta1.RoleBinding](indexer, v1beta1.Resource("rolebinding"))}
    +}
    +
    +// RoleBindings returns an object that can list and get RoleBindings.
    +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister {
    +	return roleBindingNamespaceLister{listers.NewNamespaced[*v1beta1.RoleBinding](s.ResourceIndexer, namespace)}
    +}
    +
    +// RoleBindingNamespaceLister helps list and get RoleBindings.
    +// All objects returned here must be treated as read-only.
    +type RoleBindingNamespaceLister interface {
    +	// List lists all RoleBindings in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error)
    +	// Get retrieves the RoleBinding from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.RoleBinding, error)
    +	RoleBindingNamespaceListerExpansion
    +}
    +
    +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister
    +// interface.
    +type roleBindingNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.RoleBinding]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go
    new file mode 100644
    index 0000000000..0950691e2b
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// DeviceClassLister helps list DeviceClasses.
    +// All objects returned here must be treated as read-only.
    +type DeviceClassLister interface {
    +	// List lists all DeviceClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.DeviceClass, err error)
    +	// Get retrieves the DeviceClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha3.DeviceClass, error)
    +	DeviceClassListerExpansion
    +}
    +
    +// deviceClassLister implements the DeviceClassLister interface.
    +type deviceClassLister struct {
    +	listers.ResourceIndexer[*v1alpha3.DeviceClass]
    +}
    +
    +// NewDeviceClassLister returns a new DeviceClassLister.
    +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister {
    +	return &deviceClassLister{listers.New[*v1alpha3.DeviceClass](indexer, v1alpha3.Resource("deviceclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go
    new file mode 100644
    index 0000000000..b6642f635f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go
    @@ -0,0 +1,51 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +// DeviceClassListerExpansion allows custom methods to be added to
    +// DeviceClassLister.
    +type DeviceClassListerExpansion interface{}
    +
    +// PodSchedulingContextListerExpansion allows custom methods to be added to
    +// PodSchedulingContextLister.
    +type PodSchedulingContextListerExpansion interface{}
    +
    +// PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to
    +// PodSchedulingContextNamespaceLister.
    +type PodSchedulingContextNamespaceListerExpansion interface{}
    +
    +// ResourceClaimListerExpansion allows custom methods to be added to
    +// ResourceClaimLister.
    +type ResourceClaimListerExpansion interface{}
    +
    +// ResourceClaimNamespaceListerExpansion allows custom methods to be added to
    +// ResourceClaimNamespaceLister.
    +type ResourceClaimNamespaceListerExpansion interface{}
    +
    +// ResourceClaimTemplateListerExpansion allows custom methods to be added to
    +// ResourceClaimTemplateLister.
    +type ResourceClaimTemplateListerExpansion interface{}
    +
    +// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to
    +// ResourceClaimTemplateNamespaceLister.
    +type ResourceClaimTemplateNamespaceListerExpansion interface{}
    +
    +// ResourceSliceListerExpansion allows custom methods to be added to
    +// ResourceSliceLister.
    +type ResourceSliceListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go
    new file mode 100644
    index 0000000000..ed9b049432
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PodSchedulingContextLister helps list PodSchedulingContexts.
    +// All objects returned here must be treated as read-only.
    +type PodSchedulingContextLister interface {
    +	// List lists all PodSchedulingContexts in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error)
    +	// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts.
    +	PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister
    +	PodSchedulingContextListerExpansion
    +}
    +
    +// podSchedulingContextLister implements the PodSchedulingContextLister interface.
    +type podSchedulingContextLister struct {
    +	listers.ResourceIndexer[*v1alpha3.PodSchedulingContext]
    +}
    +
    +// NewPodSchedulingContextLister returns a new PodSchedulingContextLister.
    +func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister {
    +	return &podSchedulingContextLister{listers.New[*v1alpha3.PodSchedulingContext](indexer, v1alpha3.Resource("podschedulingcontext"))}
    +}
    +
    +// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts.
    +func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister {
    +	return podSchedulingContextNamespaceLister{listers.NewNamespaced[*v1alpha3.PodSchedulingContext](s.ResourceIndexer, namespace)}
    +}
    +
    +// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts.
    +// All objects returned here must be treated as read-only.
    +type PodSchedulingContextNamespaceLister interface {
    +	// List lists all PodSchedulingContexts in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error)
    +	// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha3.PodSchedulingContext, error)
    +	PodSchedulingContextNamespaceListerExpansion
    +}
    +
    +// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister
    +// interface.
    +type podSchedulingContextNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha3.PodSchedulingContext]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go
    new file mode 100644
    index 0000000000..ac6a3e1564
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceClaimLister helps list ResourceClaims.
    +// All objects returned here must be treated as read-only.
    +type ResourceClaimLister interface {
    +	// List lists all ResourceClaims in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error)
    +	// ResourceClaims returns an object that can list and get ResourceClaims.
    +	ResourceClaims(namespace string) ResourceClaimNamespaceLister
    +	ResourceClaimListerExpansion
    +}
    +
    +// resourceClaimLister implements the ResourceClaimLister interface.
    +type resourceClaimLister struct {
    +	listers.ResourceIndexer[*v1alpha3.ResourceClaim]
    +}
    +
    +// NewResourceClaimLister returns a new ResourceClaimLister.
    +func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister {
    +	return &resourceClaimLister{listers.New[*v1alpha3.ResourceClaim](indexer, v1alpha3.Resource("resourceclaim"))}
    +}
    +
    +// ResourceClaims returns an object that can list and get ResourceClaims.
    +func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister {
    +	return resourceClaimNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaim](s.ResourceIndexer, namespace)}
    +}
    +
    +// ResourceClaimNamespaceLister helps list and get ResourceClaims.
    +// All objects returned here must be treated as read-only.
    +type ResourceClaimNamespaceLister interface {
    +	// List lists all ResourceClaims in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error)
    +	// Get retrieves the ResourceClaim from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha3.ResourceClaim, error)
    +	ResourceClaimNamespaceListerExpansion
    +}
    +
    +// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister
    +// interface.
    +type resourceClaimNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha3.ResourceClaim]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go
    new file mode 100644
    index 0000000000..6c15f82bba
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceClaimTemplateLister helps list ResourceClaimTemplates.
    +// All objects returned here must be treated as read-only.
    +type ResourceClaimTemplateLister interface {
    +	// List lists all ResourceClaimTemplates in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error)
    +	// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates.
    +	ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister
    +	ResourceClaimTemplateListerExpansion
    +}
    +
    +// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface.
    +type resourceClaimTemplateLister struct {
    +	listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate]
    +}
    +
    +// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister.
    +func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister {
    +	return &resourceClaimTemplateLister{listers.New[*v1alpha3.ResourceClaimTemplate](indexer, v1alpha3.Resource("resourceclaimtemplate"))}
    +}
    +
    +// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates.
    +func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister {
    +	return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)}
    +}
    +
    +// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates.
    +// All objects returned here must be treated as read-only.
    +type ResourceClaimTemplateNamespaceLister interface {
    +	// List lists all ResourceClaimTemplates in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error)
    +	// Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha3.ResourceClaimTemplate, error)
    +	ResourceClaimTemplateNamespaceListerExpansion
    +}
    +
    +// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister
    +// interface.
    +type resourceClaimTemplateNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go
    new file mode 100644
    index 0000000000..ae87b8b66d
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha3
    +
    +import (
    +	v1alpha3 "k8s.io/api/resource/v1alpha3"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// ResourceSliceLister helps list ResourceSlices.
    +// All objects returned here must be treated as read-only.
    +type ResourceSliceLister interface {
    +	// List lists all ResourceSlices in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha3.ResourceSlice, err error)
    +	// Get retrieves the ResourceSlice from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha3.ResourceSlice, error)
    +	ResourceSliceListerExpansion
    +}
    +
    +// resourceSliceLister implements the ResourceSliceLister interface.
    +type resourceSliceLister struct {
    +	listers.ResourceIndexer[*v1alpha3.ResourceSlice]
    +}
    +
    +// NewResourceSliceLister returns a new ResourceSliceLister.
    +func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister {
    +	return &resourceSliceLister{listers.New[*v1alpha3.ResourceSlice](indexer, v1alpha3.Resource("resourceslice"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..d0c45d0125
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// PriorityClassListerExpansion allows custom methods to be added to
    +// PriorityClassLister.
    +type PriorityClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go
    new file mode 100644
    index 0000000000..b9179b5685
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/scheduling/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassLister helps list PriorityClasses.
    +// All objects returned here must be treated as read-only.
    +type PriorityClassLister interface {
    +	// List lists all PriorityClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.PriorityClass, err error)
    +	// Get retrieves the PriorityClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.PriorityClass, error)
    +	PriorityClassListerExpansion
    +}
    +
    +// priorityClassLister implements the PriorityClassLister interface.
    +type priorityClassLister struct {
    +	listers.ResourceIndexer[*v1.PriorityClass]
    +}
    +
    +// NewPriorityClassLister returns a new PriorityClassLister.
    +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister {
    +	return &priorityClassLister{listers.New[*v1.PriorityClass](indexer, v1.Resource("priorityclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..bde8b6206c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// PriorityClassListerExpansion allows custom methods to be added to
    +// PriorityClassLister.
    +type PriorityClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go
    new file mode 100644
    index 0000000000..776ad5ae25
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/scheduling/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassLister helps list PriorityClasses.
    +// All objects returned here must be treated as read-only.
    +type PriorityClassLister interface {
    +	// List lists all PriorityClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error)
    +	// Get retrieves the PriorityClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.PriorityClass, error)
    +	PriorityClassListerExpansion
    +}
    +
    +// priorityClassLister implements the PriorityClassLister interface.
    +type priorityClassLister struct {
    +	listers.ResourceIndexer[*v1alpha1.PriorityClass]
    +}
    +
    +// NewPriorityClassLister returns a new PriorityClassLister.
    +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister {
    +	return &priorityClassLister{listers.New[*v1alpha1.PriorityClass](indexer, v1alpha1.Resource("priorityclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..b806e8cf80
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// PriorityClassListerExpansion allows custom methods to be added to
    +// PriorityClassLister.
    +type PriorityClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go
    new file mode 100644
    index 0000000000..966064e5d6
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/scheduling/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// PriorityClassLister helps list PriorityClasses.
    +// All objects returned here must be treated as read-only.
    +type PriorityClassLister interface {
    +	// List lists all PriorityClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error)
    +	// Get retrieves the PriorityClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.PriorityClass, error)
    +	PriorityClassListerExpansion
    +}
    +
    +// priorityClassLister implements the PriorityClassLister interface.
    +type priorityClassLister struct {
    +	listers.ResourceIndexer[*v1beta1.PriorityClass]
    +}
    +
    +// NewPriorityClassLister returns a new PriorityClassLister.
    +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister {
    +	return &priorityClassLister{listers.New[*v1beta1.PriorityClass](indexer, v1beta1.Resource("priorityclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go
    new file mode 100644
    index 0000000000..db64f45887
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/storage/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIDriverLister helps list CSIDrivers.
    +// All objects returned here must be treated as read-only.
    +type CSIDriverLister interface {
    +	// List lists all CSIDrivers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CSIDriver, err error)
    +	// Get retrieves the CSIDriver from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.CSIDriver, error)
    +	CSIDriverListerExpansion
    +}
    +
    +// cSIDriverLister implements the CSIDriverLister interface.
    +type cSIDriverLister struct {
    +	listers.ResourceIndexer[*v1.CSIDriver]
    +}
    +
    +// NewCSIDriverLister returns a new CSIDriverLister.
    +func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister {
    +	return &cSIDriverLister{listers.New[*v1.CSIDriver](indexer, v1.Resource("csidriver"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go
    new file mode 100644
    index 0000000000..5bfd2a43ae
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/storage/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSINodeLister helps list CSINodes.
    +// All objects returned here must be treated as read-only.
    +type CSINodeLister interface {
    +	// List lists all CSINodes in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CSINode, err error)
    +	// Get retrieves the CSINode from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.CSINode, error)
    +	CSINodeListerExpansion
    +}
    +
    +// cSINodeLister implements the CSINodeLister interface.
    +type cSINodeLister struct {
    +	listers.ResourceIndexer[*v1.CSINode]
    +}
    +
    +// NewCSINodeLister returns a new CSINodeLister.
    +func NewCSINodeLister(indexer cache.Indexer) CSINodeLister {
    +	return &cSINodeLister{listers.New[*v1.CSINode](indexer, v1.Resource("csinode"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..c2acfa1153
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/storage/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityLister helps list CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityLister interface {
    +	// List lists all CSIStorageCapacities in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error)
    +	// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +	CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister
    +	CSIStorageCapacityListerExpansion
    +}
    +
    +// cSIStorageCapacityLister implements the CSIStorageCapacityLister interface.
    +type cSIStorageCapacityLister struct {
    +	listers.ResourceIndexer[*v1.CSIStorageCapacity]
    +}
    +
    +// NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister.
    +func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister {
    +	return &cSIStorageCapacityLister{listers.New[*v1.CSIStorageCapacity](indexer, v1.Resource("csistoragecapacity"))}
    +}
    +
    +// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister {
    +	return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1.CSIStorageCapacity](s.ResourceIndexer, namespace)}
    +}
    +
    +// CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityNamespaceLister interface {
    +	// List lists all CSIStorageCapacities in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error)
    +	// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.CSIStorageCapacity, error)
    +	CSIStorageCapacityNamespaceListerExpansion
    +}
    +
    +// cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister
    +// interface.
    +type cSIStorageCapacityNamespaceLister struct {
    +	listers.ResourceIndexer[*v1.CSIStorageCapacity]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go
    new file mode 100644
    index 0000000000..196b787e7e
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go
    @@ -0,0 +1,43 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +// CSIDriverListerExpansion allows custom methods to be added to
    +// CSIDriverLister.
    +type CSIDriverListerExpansion interface{}
    +
    +// CSINodeListerExpansion allows custom methods to be added to
    +// CSINodeLister.
    +type CSINodeListerExpansion interface{}
    +
    +// CSIStorageCapacityListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityLister.
    +type CSIStorageCapacityListerExpansion interface{}
    +
    +// CSIStorageCapacityNamespaceListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityNamespaceLister.
    +type CSIStorageCapacityNamespaceListerExpansion interface{}
    +
    +// StorageClassListerExpansion allows custom methods to be added to
    +// StorageClassLister.
    +type StorageClassListerExpansion interface{}
    +
    +// VolumeAttachmentListerExpansion allows custom methods to be added to
    +// VolumeAttachmentLister.
    +type VolumeAttachmentListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go
    new file mode 100644
    index 0000000000..fc37594446
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/storage/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageClassLister helps list StorageClasses.
    +// All objects returned here must be treated as read-only.
    +type StorageClassLister interface {
    +	// List lists all StorageClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.StorageClass, err error)
    +	// Get retrieves the StorageClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.StorageClass, error)
    +	StorageClassListerExpansion
    +}
    +
    +// storageClassLister implements the StorageClassLister interface.
    +type storageClassLister struct {
    +	listers.ResourceIndexer[*v1.StorageClass]
    +}
    +
    +// NewStorageClassLister returns a new StorageClassLister.
    +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister {
    +	return &storageClassLister{listers.New[*v1.StorageClass](indexer, v1.Resource("storageclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go
    new file mode 100644
    index 0000000000..44754d6f25
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1
    +
    +import (
    +	v1 "k8s.io/api/storage/v1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentLister helps list VolumeAttachments.
    +// All objects returned here must be treated as read-only.
    +type VolumeAttachmentLister interface {
    +	// List lists all VolumeAttachments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error)
    +	// Get retrieves the VolumeAttachment from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1.VolumeAttachment, error)
    +	VolumeAttachmentListerExpansion
    +}
    +
    +// volumeAttachmentLister implements the VolumeAttachmentLister interface.
    +type volumeAttachmentLister struct {
    +	listers.ResourceIndexer[*v1.VolumeAttachment]
    +}
    +
    +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister.
    +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister {
    +	return &volumeAttachmentLister{listers.New[*v1.VolumeAttachment](indexer, v1.Resource("volumeattachment"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..7f75aae2cd
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/storage/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityLister helps list CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityLister interface {
    +	// List lists all CSIStorageCapacities in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error)
    +	// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +	CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister
    +	CSIStorageCapacityListerExpansion
    +}
    +
    +// cSIStorageCapacityLister implements the CSIStorageCapacityLister interface.
    +type cSIStorageCapacityLister struct {
    +	listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity]
    +}
    +
    +// NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister.
    +func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister {
    +	return &cSIStorageCapacityLister{listers.New[*v1alpha1.CSIStorageCapacity](indexer, v1alpha1.Resource("csistoragecapacity"))}
    +}
    +
    +// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister {
    +	return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)}
    +}
    +
    +// CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityNamespaceLister interface {
    +	// List lists all CSIStorageCapacities in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error)
    +	// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.CSIStorageCapacity, error)
    +	CSIStorageCapacityNamespaceListerExpansion
    +}
    +
    +// cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister
    +// interface.
    +type cSIStorageCapacityNamespaceLister struct {
    +	listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..327fb6e31c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go
    @@ -0,0 +1,35 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// CSIStorageCapacityListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityLister.
    +type CSIStorageCapacityListerExpansion interface{}
    +
    +// CSIStorageCapacityNamespaceListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityNamespaceLister.
    +type CSIStorageCapacityNamespaceListerExpansion interface{}
    +
    +// VolumeAttachmentListerExpansion allows custom methods to be added to
    +// VolumeAttachmentLister.
    +type VolumeAttachmentListerExpansion interface{}
    +
    +// VolumeAttributesClassListerExpansion allows custom methods to be added to
    +// VolumeAttributesClassLister.
    +type VolumeAttributesClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go
    new file mode 100644
    index 0000000000..122864ffef
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/storage/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentLister helps list VolumeAttachments.
    +// All objects returned here must be treated as read-only.
    +type VolumeAttachmentLister interface {
    +	// List lists all VolumeAttachments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error)
    +	// Get retrieves the VolumeAttachment from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.VolumeAttachment, error)
    +	VolumeAttachmentListerExpansion
    +}
    +
    +// volumeAttachmentLister implements the VolumeAttachmentLister interface.
    +type volumeAttachmentLister struct {
    +	listers.ResourceIndexer[*v1alpha1.VolumeAttachment]
    +}
    +
    +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister.
    +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister {
    +	return &volumeAttachmentLister{listers.New[*v1alpha1.VolumeAttachment](indexer, v1alpha1.Resource("volumeattachment"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..5d8ae09d7c
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/storage/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttributesClassLister helps list VolumeAttributesClasses.
    +// All objects returned here must be treated as read-only.
    +type VolumeAttributesClassLister interface {
    +	// List lists all VolumeAttributesClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error)
    +	// Get retrieves the VolumeAttributesClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.VolumeAttributesClass, error)
    +	VolumeAttributesClassListerExpansion
    +}
    +
    +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface.
    +type volumeAttributesClassLister struct {
    +	listers.ResourceIndexer[*v1alpha1.VolumeAttributesClass]
    +}
    +
    +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister.
    +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister {
    +	return &volumeAttributesClassLister{listers.New[*v1alpha1.VolumeAttributesClass](indexer, v1alpha1.Resource("volumeattributesclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go
    new file mode 100644
    index 0000000000..6600386749
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIDriverLister helps list CSIDrivers.
    +// All objects returned here must be treated as read-only.
    +type CSIDriverLister interface {
    +	// List lists all CSIDrivers in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error)
    +	// Get retrieves the CSIDriver from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.CSIDriver, error)
    +	CSIDriverListerExpansion
    +}
    +
    +// cSIDriverLister implements the CSIDriverLister interface.
    +type cSIDriverLister struct {
    +	listers.ResourceIndexer[*v1beta1.CSIDriver]
    +}
    +
    +// NewCSIDriverLister returns a new CSIDriverLister.
    +func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister {
    +	return &cSIDriverLister{listers.New[*v1beta1.CSIDriver](indexer, v1beta1.Resource("csidriver"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go
    new file mode 100644
    index 0000000000..2c29ccabf3
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSINodeLister helps list CSINodes.
    +// All objects returned here must be treated as read-only.
    +type CSINodeLister interface {
    +	// List lists all CSINodes in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CSINode, err error)
    +	// Get retrieves the CSINode from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.CSINode, error)
    +	CSINodeListerExpansion
    +}
    +
    +// cSINodeLister implements the CSINodeLister interface.
    +type cSINodeLister struct {
    +	listers.ResourceIndexer[*v1beta1.CSINode]
    +}
    +
    +// NewCSINodeLister returns a new CSINodeLister.
    +func NewCSINodeLister(indexer cache.Indexer) CSINodeLister {
    +	return &cSINodeLister{listers.New[*v1beta1.CSINode](indexer, v1beta1.Resource("csinode"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go
    new file mode 100644
    index 0000000000..365304df12
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// CSIStorageCapacityLister helps list CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityLister interface {
    +	// List lists all CSIStorageCapacities in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error)
    +	// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +	CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister
    +	CSIStorageCapacityListerExpansion
    +}
    +
    +// cSIStorageCapacityLister implements the CSIStorageCapacityLister interface.
    +type cSIStorageCapacityLister struct {
    +	listers.ResourceIndexer[*v1beta1.CSIStorageCapacity]
    +}
    +
    +// NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister.
    +func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister {
    +	return &cSIStorageCapacityLister{listers.New[*v1beta1.CSIStorageCapacity](indexer, v1beta1.Resource("csistoragecapacity"))}
    +}
    +
    +// CSIStorageCapacities returns an object that can list and get CSIStorageCapacities.
    +func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister {
    +	return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)}
    +}
    +
    +// CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities.
    +// All objects returned here must be treated as read-only.
    +type CSIStorageCapacityNamespaceLister interface {
    +	// List lists all CSIStorageCapacities in the indexer for a given namespace.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error)
    +	// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.CSIStorageCapacity, error)
    +	CSIStorageCapacityNamespaceListerExpansion
    +}
    +
    +// cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister
    +// interface.
    +type cSIStorageCapacityNamespaceLister struct {
    +	listers.ResourceIndexer[*v1beta1.CSIStorageCapacity]
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go
    new file mode 100644
    index 0000000000..4f56776be1
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go
    @@ -0,0 +1,47 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +// CSIDriverListerExpansion allows custom methods to be added to
    +// CSIDriverLister.
    +type CSIDriverListerExpansion interface{}
    +
    +// CSINodeListerExpansion allows custom methods to be added to
    +// CSINodeLister.
    +type CSINodeListerExpansion interface{}
    +
    +// CSIStorageCapacityListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityLister.
    +type CSIStorageCapacityListerExpansion interface{}
    +
    +// CSIStorageCapacityNamespaceListerExpansion allows custom methods to be added to
    +// CSIStorageCapacityNamespaceLister.
    +type CSIStorageCapacityNamespaceListerExpansion interface{}
    +
    +// StorageClassListerExpansion allows custom methods to be added to
    +// StorageClassLister.
    +type StorageClassListerExpansion interface{}
    +
    +// VolumeAttachmentListerExpansion allows custom methods to be added to
    +// VolumeAttachmentLister.
    +type VolumeAttachmentListerExpansion interface{}
    +
    +// VolumeAttributesClassListerExpansion allows custom methods to be added to
    +// VolumeAttributesClassLister.
    +type VolumeAttributesClassListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go
    new file mode 100644
    index 0000000000..070c061bc5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageClassLister helps list StorageClasses.
    +// All objects returned here must be treated as read-only.
    +type StorageClassLister interface {
    +	// List lists all StorageClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error)
    +	// Get retrieves the StorageClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.StorageClass, error)
    +	StorageClassListerExpansion
    +}
    +
    +// storageClassLister implements the StorageClassLister interface.
    +type storageClassLister struct {
    +	listers.ResourceIndexer[*v1beta1.StorageClass]
    +}
    +
    +// NewStorageClassLister returns a new StorageClassLister.
    +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister {
    +	return &storageClassLister{listers.New[*v1beta1.StorageClass](indexer, v1beta1.Resource("storageclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go
    new file mode 100644
    index 0000000000..d437c1eaeb
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttachmentLister helps list VolumeAttachments.
    +// All objects returned here must be treated as read-only.
    +type VolumeAttachmentLister interface {
    +	// List lists all VolumeAttachments in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error)
    +	// Get retrieves the VolumeAttachment from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.VolumeAttachment, error)
    +	VolumeAttachmentListerExpansion
    +}
    +
    +// volumeAttachmentLister implements the VolumeAttachmentLister interface.
    +type volumeAttachmentLister struct {
    +	listers.ResourceIndexer[*v1beta1.VolumeAttachment]
    +}
    +
    +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister.
    +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister {
    +	return &volumeAttachmentLister{listers.New[*v1beta1.VolumeAttachment](indexer, v1beta1.Resource("volumeattachment"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go
    new file mode 100644
    index 0000000000..2ff71e3d7f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1beta1
    +
    +import (
    +	v1beta1 "k8s.io/api/storage/v1beta1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// VolumeAttributesClassLister helps list VolumeAttributesClasses.
    +// All objects returned here must be treated as read-only.
    +type VolumeAttributesClassLister interface {
    +	// List lists all VolumeAttributesClasses in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1beta1.VolumeAttributesClass, err error)
    +	// Get retrieves the VolumeAttributesClass from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1beta1.VolumeAttributesClass, error)
    +	VolumeAttributesClassListerExpansion
    +}
    +
    +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface.
    +type volumeAttributesClassLister struct {
    +	listers.ResourceIndexer[*v1beta1.VolumeAttributesClass]
    +}
    +
    +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister.
    +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister {
    +	return &volumeAttributesClassLister{listers.New[*v1beta1.VolumeAttributesClass](indexer, v1beta1.Resource("volumeattributesclass"))}
    +}
    diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go
    new file mode 100644
    index 0000000000..92eb5c65b4
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go
    @@ -0,0 +1,23 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +// StorageVersionMigrationListerExpansion allows custom methods to be added to
    +// StorageVersionMigrationLister.
    +type StorageVersionMigrationListerExpansion interface{}
    diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go
    new file mode 100644
    index 0000000000..794dba25c8
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go
    @@ -0,0 +1,48 @@
    +/*
    +Copyright The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +// Code generated by lister-gen. DO NOT EDIT.
    +
    +package v1alpha1
    +
    +import (
    +	v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
    +	"k8s.io/apimachinery/pkg/labels"
    +	"k8s.io/client-go/listers"
    +	"k8s.io/client-go/tools/cache"
    +)
    +
    +// StorageVersionMigrationLister helps list StorageVersionMigrations.
    +// All objects returned here must be treated as read-only.
    +type StorageVersionMigrationLister interface {
    +	// List lists all StorageVersionMigrations in the indexer.
    +	// Objects returned here must be treated as read-only.
    +	List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error)
    +	// Get retrieves the StorageVersionMigration from the index for a given name.
    +	// Objects returned here must be treated as read-only.
    +	Get(name string) (*v1alpha1.StorageVersionMigration, error)
    +	StorageVersionMigrationListerExpansion
    +}
    +
    +// storageVersionMigrationLister implements the StorageVersionMigrationLister interface.
    +type storageVersionMigrationLister struct {
    +	listers.ResourceIndexer[*v1alpha1.StorageVersionMigration]
    +}
    +
    +// NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister.
    +func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister {
    +	return &storageVersionMigrationLister{listers.New[*v1alpha1.StorageVersionMigration](indexer, v1alpha1.Resource("storageversionmigration"))}
    +}
    diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
    index 850e57daeb..f5a9f68ca4 100644
    --- a/vendor/k8s.io/client-go/rest/request.go
    +++ b/vendor/k8s.io/client-go/rest/request.go
    @@ -37,12 +37,15 @@ import (
     	"golang.org/x/net/http2"
     
     	"k8s.io/apimachinery/pkg/api/errors"
    +	"k8s.io/apimachinery/pkg/api/meta"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/conversion"
     	"k8s.io/apimachinery/pkg/runtime"
     	"k8s.io/apimachinery/pkg/runtime/schema"
     	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
     	"k8s.io/apimachinery/pkg/util/net"
     	"k8s.io/apimachinery/pkg/watch"
    +	clientfeatures "k8s.io/client-go/features"
     	restclientwatch "k8s.io/client-go/rest/watch"
     	"k8s.io/client-go/tools/metrics"
     	"k8s.io/client-go/util/flowcontrol"
    @@ -768,6 +771,142 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
     	}
     }
     
    +type WatchListResult struct {
    +	// err holds any errors we might have received
    +	// during streaming.
    +	err error
    +
    +	// items hold the collected data
    +	items []runtime.Object
    +
    +	// initialEventsEndBookmarkRV holds the resource version
    +	// extracted from the bookmark event that marks
    +	// the end of the stream.
    +	initialEventsEndBookmarkRV string
    +
    +	// gv represents the API version
    +	// it is used to construct the final list response
    +	// normally this information is filled by the server
    +	gv schema.GroupVersion
    +}
    +
    +func (r WatchListResult) Into(obj runtime.Object) error {
    +	if r.err != nil {
    +		return r.err
    +	}
    +
    +	listPtr, err := meta.GetItemsPtr(obj)
    +	if err != nil {
    +		return err
    +	}
    +	listVal, err := conversion.EnforcePtr(listPtr)
    +	if err != nil {
    +		return err
    +	}
    +	if listVal.Kind() != reflect.Slice {
    +		return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
    +	}
    +
    +	if len(r.items) == 0 {
    +		listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0))
    +	} else {
    +		listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items)))
    +		for i, o := range r.items {
    +			if listVal.Type().Elem() != reflect.TypeOf(o).Elem() {
    +				return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem())
    +			}
    +			listVal.Index(i).Set(reflect.ValueOf(o).Elem())
    +		}
    +	}
    +
    +	listMeta, err := meta.ListAccessor(obj)
    +	if err != nil {
    +		return err
    +	}
    +	listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV)
    +
    +	typeMeta, err := meta.TypeAccessor(obj)
    +	if err != nil {
    +		return err
    +	}
    +	version := r.gv.String()
    +	typeMeta.SetAPIVersion(version)
    +	typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name())
    +
    +	return nil
    +}
    +
    +// WatchList establishes a stream to get a consistent snapshot of data
    +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal
    +//
    +// Note that the watchlist requires properly setting the ListOptions
    +// otherwise it just establishes a regular watch with the server.
    +// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists
    +// to see what parameters are currently required.
    +func (r *Request) WatchList(ctx context.Context) WatchListResult {
    +	if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
    +		return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)}
    +	}
    +	// TODO(#115478): consider validating request parameters (i.e sendInitialEvents).
    +	//  Most users use the generated client, which handles the proper setting of parameters.
    +	//  We don't have validation for other methods (e.g., the Watch)
    +	//  thus, for symmetry, we haven't added additional checks for the WatchList method.
    +	w, err := r.Watch(ctx)
    +	if err != nil {
    +		return WatchListResult{err: err}
    +	}
    +	return r.handleWatchList(ctx, w)
    +}
    +
    +// handleWatchList holds the actual logic for easier unit testing.
    +// Note that this function will close the passed watch.
    +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult {
    +	defer w.Stop()
    +	var lastKey string
    +	var items []runtime.Object
    +
    +	for {
    +		select {
    +		case <-ctx.Done():
    +			return WatchListResult{err: ctx.Err()}
    +		case event, ok := <-w.ResultChan():
    +			if !ok {
    +				return WatchListResult{err: fmt.Errorf("unexpected watch close")}
    +			}
    +			if event.Type == watch.Error {
    +				return WatchListResult{err: errors.FromObject(event.Object)}
    +			}
    +			meta, err := meta.Accessor(event.Object)
    +			if err != nil {
    +				return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)}
    +			}
    +
    +			switch event.Type {
    +			case watch.Added:
    +				// the following check ensures that the response is ordered.
    +				// earlier servers had a bug that caused them to not sort the output.
    +				// in such cases, return an error which can trigger fallback logic.
    +				key := objectKeyFromMeta(meta)
    +				if len(lastKey) > 0 && lastKey > key {
    +					return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)}
    +				}
    +				items = append(items, event.Object)
    +				lastKey = key
    +			case watch.Bookmark:
    +				if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" {
    +					return WatchListResult{
    +						items:                      items,
    +						initialEventsEndBookmarkRV: meta.GetResourceVersion(),
    +						gv:                         r.c.content.GroupVersion,
    +					}
    +				}
    +			default:
    +				return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)}
    +			}
    +		}
    +	}
    +}
    +
     func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) {
     	contentType := resp.Header.Get("Content-Type")
     	mediaType, params, err := mime.ParseMediaType(contentType)
    @@ -1470,3 +1609,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string {
     	}
     	return IsValidPathSegmentName(name)
     }
    +
    +func objectKeyFromMeta(objMeta metav1.Object) string {
    +	if len(objMeta.GetNamespace()) > 0 {
    +		return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
    +	}
    +	return objMeta.GetName()
    +}
    diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
    index e95c020b2e..9e1e04d14e 100644
    --- a/vendor/k8s.io/client-go/rest/watch/decoder.go
    +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
    @@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
     		return "", nil, err
     	}
     	if res != &got {
    -		return "", nil, fmt.Errorf("unable to decode to metav1.Event")
    +		return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent")
     	}
     	switch got.Type {
     	case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark):
    diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go
    index c8ae0aaf5d..270cc4ddbd 100644
    --- a/vendor/k8s.io/client-go/testing/actions.go
    +++ b/vendor/k8s.io/client-go/testing/actions.go
    @@ -30,41 +30,61 @@ import (
     )
     
     func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl {
    +	return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{})
    +}
    +
    +func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl {
     	action := GetActionImpl{}
     	action.Verb = "get"
     	action.Resource = resource
     	action.Name = name
    +	action.GetOptions = opts
     
     	return action
     }
     
     func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl {
    +	return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{})
    +}
    +
    +func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl {
     	action := GetActionImpl{}
     	action.Verb = "get"
     	action.Resource = resource
     	action.Namespace = namespace
     	action.Name = name
    +	action.GetOptions = opts
     
     	return action
     }
     
     func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl {
    +	return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{})
    +}
    +
    +func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl {
     	action := GetActionImpl{}
     	action.Verb = "get"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Namespace = namespace
     	action.Name = name
    +	action.GetOptions = opts
     
     	return action
     }
     
     func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl {
    +	return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{})
    +}
    +
    +func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl {
     	action := GetActionImpl{}
     	action.Verb = "get"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Name = name
    +	action.GetOptions = opts
     
     	return action
     }
    @@ -76,6 +96,21 @@ func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVe
     	action.Kind = kind
     	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
     	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
    +	action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
    +
    +	return action
    +}
    +
    +func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl {
    +	action := ListActionImpl{}
    +	action.Verb = "list"
    +	action.Resource = resource
    +	action.Kind = kind
    +	action.ListOptions = opts
    +
    +	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
    +	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
    +	action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
     
     	return action
     }
    @@ -86,6 +121,21 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio
     	action.Resource = resource
     	action.Kind = kind
     	action.Namespace = namespace
    +	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
    +	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
    +	action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
    +
    +	return action
    +}
    +
    +func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl {
    +	action := ListActionImpl{}
    +	action.Verb = "list"
    +	action.Resource = resource
    +	action.Kind = kind
    +	action.Namespace = namespace
    +	action.ListOptions = opts
    +
     	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
     	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
     
    @@ -93,36 +143,55 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio
     }
     
     func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl {
    +	return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{})
    +}
    +
    +func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
     	action := CreateActionImpl{}
     	action.Verb = "create"
     	action.Resource = resource
     	action.Object = object
    +	action.CreateOptions = opts
     
     	return action
     }
     
     func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl {
    +	return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{})
    +}
    +
    +func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
     	action := CreateActionImpl{}
     	action.Verb = "create"
     	action.Resource = resource
     	action.Namespace = namespace
     	action.Object = object
    +	action.CreateOptions = opts
     
     	return action
     }
     
     func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl {
    +	return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{})
    +}
    +
    +func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
     	action := CreateActionImpl{}
     	action.Verb = "create"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Name = name
     	action.Object = object
    +	action.CreateOptions = opts
     
     	return action
     }
     
     func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl {
    +	return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{})
    +}
    +
    +func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
     	action := CreateActionImpl{}
     	action.Verb = "create"
     	action.Resource = resource
    @@ -130,41 +199,61 @@ func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subr
     	action.Subresource = subresource
     	action.Name = name
     	action.Object = object
    +	action.CreateOptions = opts
     
     	return action
     }
     
     func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl {
    +	return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{})
    +}
    +
    +func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
     	action := UpdateActionImpl{}
     	action.Verb = "update"
     	action.Resource = resource
     	action.Object = object
    +	action.UpdateOptions = opts
     
     	return action
     }
     
     func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl {
    +	return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{})
    +}
    +
    +func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
     	action := UpdateActionImpl{}
     	action.Verb = "update"
     	action.Resource = resource
     	action.Namespace = namespace
     	action.Object = object
    +	action.UpdateOptions = opts
     
     	return action
     }
     
     func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl {
    +	return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{})
    +}
    +
    +func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl {
     	action := PatchActionImpl{}
     	action.Verb = "patch"
     	action.Resource = resource
     	action.Name = name
     	action.PatchType = pt
     	action.Patch = patch
    +	action.PatchOptions = opts
     
     	return action
     }
     
     func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl {
    +	return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{})
    +}
    +
    +func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl {
     	action := PatchActionImpl{}
     	action.Verb = "patch"
     	action.Resource = resource
    @@ -172,11 +261,16 @@ func NewPatchAction(resource schema.GroupVersionResource, namespace string, name
     	action.Name = name
     	action.PatchType = pt
     	action.Patch = patch
    +	action.PatchOptions = opts
     
     	return action
     }
     
     func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
    +	return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...)
    +}
    +
    +func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl {
     	action := PatchActionImpl{}
     	action.Verb = "patch"
     	action.Resource = resource
    @@ -184,11 +278,16 @@ func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name st
     	action.Name = name
     	action.PatchType = pt
     	action.Patch = patch
    +	action.PatchOptions = opts
     
     	return action
     }
     
     func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
    +	return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...)
    +}
    +
    +func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl {
     	action := PatchActionImpl{}
     	action.Verb = "patch"
     	action.Resource = resource
    @@ -197,26 +296,38 @@ func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace,
     	action.Name = name
     	action.PatchType = pt
     	action.Patch = patch
    +	action.PatchOptions = opts
     
     	return action
     }
     
     func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl {
    +	return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{})
    +}
    +
    +func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
     	action := UpdateActionImpl{}
     	action.Verb = "update"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Object = object
    +	action.UpdateOptions = opts
     
     	return action
     }
    +
     func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl {
    +	return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{})
    +}
    +
    +func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
     	action := UpdateActionImpl{}
     	action.Verb = "update"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Namespace = namespace
     	action.Object = object
    +	action.UpdateOptions = opts
     
     	return action
     }
    @@ -236,11 +347,16 @@ func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name s
     }
     
     func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl {
    +	return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{})
    +}
    +
    +func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl {
     	action := DeleteActionImpl{}
     	action.Verb = "delete"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Name = name
    +	action.DeleteOptions = opts
     
     	return action
     }
    @@ -261,41 +377,69 @@ func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace,
     }
     
     func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl {
    +	return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{})
    +}
    +
    +func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl {
     	action := DeleteActionImpl{}
     	action.Verb = "delete"
     	action.Resource = resource
     	action.Subresource = subresource
     	action.Namespace = namespace
     	action.Name = name
    +	action.DeleteOptions = opts
     
     	return action
     }
     
     func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl {
    +	listOpts, _ := opts.(metav1.ListOptions)
    +	return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts)
    +}
    +
    +func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl {
     	action := DeleteCollectionActionImpl{}
     	action.Verb = "delete-collection"
     	action.Resource = resource
    -	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
    +	action.DeleteOptions = deleteOpts
    +	action.ListOptions = listOpts
    +
    +	labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts)
     	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
     
     	return action
     }
     
     func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl {
    +	listOpts, _ := opts.(metav1.ListOptions)
    +	return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts)
    +}
    +
    +func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl {
     	action := DeleteCollectionActionImpl{}
     	action.Verb = "delete-collection"
     	action.Resource = resource
     	action.Namespace = namespace
    -	labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
    +	action.DeleteOptions = deleteOpts
    +	action.ListOptions = listOpts
    +
    +	labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts)
     	action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
     
     	return action
     }
     
     func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl {
    +	listOpts, _ := opts.(metav1.ListOptions)
    +	return NewRootWatchActionWithOptions(resource, listOpts)
    +}
    +
    +func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl {
     	action := WatchActionImpl{}
     	action.Verb = "watch"
     	action.Resource = resource
    +	action.ListOptions = opts
    +
     	labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
     	action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
     
    @@ -328,10 +472,17 @@ func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fi
     }
     
     func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl {
    +	listOpts, _ := opts.(metav1.ListOptions)
    +	return NewWatchActionWithOptions(resource, namespace, listOpts)
    +}
    +
    +func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl {
     	action := WatchActionImpl{}
     	action.Verb = "watch"
     	action.Resource = resource
     	action.Namespace = namespace
    +	action.ListOptions = opts
    +
     	labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
     	action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
     
    @@ -487,17 +638,23 @@ func (a GenericActionImpl) DeepCopy() Action {
     
     type GetActionImpl struct {
     	ActionImpl
    -	Name string
    +	Name       string
    +	GetOptions metav1.GetOptions
     }
     
     func (a GetActionImpl) GetName() string {
     	return a.Name
     }
     
    +func (a GetActionImpl) GetGetOptions() metav1.GetOptions {
    +	return a.GetOptions
    +}
    +
     func (a GetActionImpl) DeepCopy() Action {
     	return GetActionImpl{
     		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
     		Name:       a.Name,
    +		GetOptions: *a.GetOptions.DeepCopy(),
     	}
     }
     
    @@ -506,6 +663,7 @@ type ListActionImpl struct {
     	Kind             schema.GroupVersionKind
     	Name             string
     	ListRestrictions ListRestrictions
    +	ListOptions      metav1.ListOptions
     }
     
     func (a ListActionImpl) GetKind() schema.GroupVersionKind {
    @@ -516,6 +674,10 @@ func (a ListActionImpl) GetListRestrictions() ListRestrictions {
     	return a.ListRestrictions
     }
     
    +func (a ListActionImpl) GetListOptions() metav1.ListOptions {
    +	return a.ListOptions
    +}
    +
     func (a ListActionImpl) DeepCopy() Action {
     	return ListActionImpl{
     		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    @@ -525,48 +687,62 @@ func (a ListActionImpl) DeepCopy() Action {
     			Labels: a.ListRestrictions.Labels.DeepCopySelector(),
     			Fields: a.ListRestrictions.Fields.DeepCopySelector(),
     		},
    +		ListOptions: *a.ListOptions.DeepCopy(),
     	}
     }
     
     type CreateActionImpl struct {
     	ActionImpl
    -	Name   string
    -	Object runtime.Object
    +	Name          string
    +	Object        runtime.Object
    +	CreateOptions metav1.CreateOptions
     }
     
     func (a CreateActionImpl) GetObject() runtime.Object {
     	return a.Object
     }
     
    +func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions {
    +	return a.CreateOptions
    +}
    +
     func (a CreateActionImpl) DeepCopy() Action {
     	return CreateActionImpl{
    -		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    -		Name:       a.Name,
    -		Object:     a.Object.DeepCopyObject(),
    +		ActionImpl:    a.ActionImpl.DeepCopy().(ActionImpl),
    +		Name:          a.Name,
    +		Object:        a.Object.DeepCopyObject(),
    +		CreateOptions: *a.CreateOptions.DeepCopy(),
     	}
     }
     
     type UpdateActionImpl struct {
     	ActionImpl
    -	Object runtime.Object
    +	Object        runtime.Object
    +	UpdateOptions metav1.UpdateOptions
     }
     
     func (a UpdateActionImpl) GetObject() runtime.Object {
     	return a.Object
     }
     
    +func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions {
    +	return a.UpdateOptions
    +}
    +
     func (a UpdateActionImpl) DeepCopy() Action {
     	return UpdateActionImpl{
    -		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    -		Object:     a.Object.DeepCopyObject(),
    +		ActionImpl:    a.ActionImpl.DeepCopy().(ActionImpl),
    +		Object:        a.Object.DeepCopyObject(),
    +		UpdateOptions: *a.UpdateOptions.DeepCopy(),
     	}
     }
     
     type PatchActionImpl struct {
     	ActionImpl
    -	Name      string
    -	PatchType types.PatchType
    -	Patch     []byte
    +	Name         string
    +	PatchType    types.PatchType
    +	Patch        []byte
    +	PatchOptions metav1.PatchOptions
     }
     
     func (a PatchActionImpl) GetName() string {
    @@ -581,14 +757,19 @@ func (a PatchActionImpl) GetPatchType() types.PatchType {
     	return a.PatchType
     }
     
    +func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions {
    +	return a.PatchOptions
    +}
    +
     func (a PatchActionImpl) DeepCopy() Action {
     	patch := make([]byte, len(a.Patch))
     	copy(patch, a.Patch)
     	return PatchActionImpl{
    -		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    -		Name:       a.Name,
    -		PatchType:  a.PatchType,
    -		Patch:      patch,
    +		ActionImpl:   a.ActionImpl.DeepCopy().(ActionImpl),
    +		Name:         a.Name,
    +		PatchType:    a.PatchType,
    +		Patch:        patch,
    +		PatchOptions: *a.PatchOptions.DeepCopy(),
     	}
     }
     
    @@ -617,12 +798,22 @@ func (a DeleteActionImpl) DeepCopy() Action {
     type DeleteCollectionActionImpl struct {
     	ActionImpl
     	ListRestrictions ListRestrictions
    +	DeleteOptions    metav1.DeleteOptions
    +	ListOptions      metav1.ListOptions
     }
     
     func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions {
     	return a.ListRestrictions
     }
     
    +func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions {
    +	return a.DeleteOptions
    +}
    +
    +func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions {
    +	return a.ListOptions
    +}
    +
     func (a DeleteCollectionActionImpl) DeepCopy() Action {
     	return DeleteCollectionActionImpl{
     		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    @@ -630,18 +821,25 @@ func (a DeleteCollectionActionImpl) DeepCopy() Action {
     			Labels: a.ListRestrictions.Labels.DeepCopySelector(),
     			Fields: a.ListRestrictions.Fields.DeepCopySelector(),
     		},
    +		DeleteOptions: *a.DeleteOptions.DeepCopy(),
    +		ListOptions:   *a.ListOptions.DeepCopy(),
     	}
     }
     
     type WatchActionImpl struct {
     	ActionImpl
     	WatchRestrictions WatchRestrictions
    +	ListOptions       metav1.ListOptions
     }
     
     func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions {
     	return a.WatchRestrictions
     }
     
    +func (a WatchActionImpl) GetListOptions() metav1.ListOptions {
    +	return a.ListOptions
    +}
    +
     func (a WatchActionImpl) DeepCopy() Action {
     	return WatchActionImpl{
     		ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
    @@ -650,6 +848,7 @@ func (a WatchActionImpl) DeepCopy() Action {
     			Fields:          a.WatchRestrictions.Fields.DeepCopySelector(),
     			ResourceVersion: a.WatchRestrictions.ResourceVersion,
     		},
    +		ListOptions: *a.ListOptions.DeepCopy(),
     	}
     }
     
    diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go
    index 396840670f..d288a3aa45 100644
    --- a/vendor/k8s.io/client-go/testing/fixture.go
    +++ b/vendor/k8s.io/client-go/testing/fixture.go
    @@ -19,19 +19,24 @@ package testing
     import (
     	"fmt"
     	"reflect"
    +	"sigs.k8s.io/structured-merge-diff/v4/typed"
    +	"sigs.k8s.io/yaml"
     	"sort"
     	"strings"
     	"sync"
     
    -	jsonpatch "github.com/evanphx/json-patch"
    +	jsonpatch "gopkg.in/evanphx/json-patch.v4"
     
    -	"k8s.io/apimachinery/pkg/api/errors"
    +	apierrors "k8s.io/apimachinery/pkg/api/errors"
     	"k8s.io/apimachinery/pkg/api/meta"
    +	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
     	"k8s.io/apimachinery/pkg/runtime"
     	"k8s.io/apimachinery/pkg/runtime/schema"
     	"k8s.io/apimachinery/pkg/types"
     	"k8s.io/apimachinery/pkg/util/json"
    +	"k8s.io/apimachinery/pkg/util/managedfields"
     	"k8s.io/apimachinery/pkg/util/strategicpatch"
     	"k8s.io/apimachinery/pkg/watch"
     	restclient "k8s.io/client-go/rest"
    @@ -46,26 +51,32 @@ type ObjectTracker interface {
     	Add(obj runtime.Object) error
     
     	// Get retrieves the object by its kind, namespace and name.
    -	Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error)
    +	Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error)
     
     	// Create adds an object to the tracker in the specified namespace.
    -	Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error
    +	Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error
     
     	// Update updates an existing object in the tracker in the specified namespace.
    -	Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error
    +	Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error
    +
    +	// Patch patches an existing object in the tracker in the specified namespace.
    +	Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error
    +
    +	// Apply applies an object in the tracker in the specified namespace.
    +	Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error
     
     	// List retrieves all objects of a given kind in the given
     	// namespace. Only non-List kinds are accepted.
    -	List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error)
    +	List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error)
     
     	// Delete deletes an existing object from the tracker. If object
     	// didn't exist in the tracker prior to deletion, Delete returns
     	// no error.
    -	Delete(gvr schema.GroupVersionResource, ns, name string) error
    +	Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error
     
     	// Watch watches objects from the tracker. Watch returns a channel
     	// which will push added / modified / deleted object.
    -	Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error)
    +	Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error)
     }
     
     // ObjectScheme abstracts the implementation of common operations on objects.
    @@ -76,133 +87,200 @@ type ObjectScheme interface {
     
     // ObjectReaction returns a ReactionFunc that applies core.Action to
     // the given tracker.
    +//
    +// If tracker also implements ManagedFieldObjectTracker, then managed fields
    +// will be handled by the tracker and apply patch actions will be evaluated
    +// using the field manager and will take field ownership into consideration.
    +// Without a ManagedFieldObjectTracker, apply patch actions do not consider
    +// field ownership.
    +//
    +// WARNING: There is no server side defaulting, validation, or conversion handled
    +// by the fake client and subresources are not handled accurately (fields in the
    +// root resource are not automatically updated when a scale resource is updated, for example).
     func ObjectReaction(tracker ObjectTracker) ReactionFunc {
    +	reactor := objectTrackerReact{tracker: tracker}
     	return func(action Action) (bool, runtime.Object, error) {
    -		ns := action.GetNamespace()
    -		gvr := action.GetResource()
     		// Here and below we need to switch on implementation types,
     		// not on interfaces, as some interfaces are identical
     		// (e.g. UpdateAction and CreateAction), so if we use them,
     		// updates and creates end up matching the same case branch.
     		switch action := action.(type) {
    -
     		case ListActionImpl:
    -			obj, err := tracker.List(gvr, action.GetKind(), ns)
    +			obj, err := reactor.List(action)
     			return true, obj, err
    -
     		case GetActionImpl:
    -			obj, err := tracker.Get(gvr, ns, action.GetName())
    +			obj, err := reactor.Get(action)
     			return true, obj, err
    -
     		case CreateActionImpl:
    -			objMeta, err := meta.Accessor(action.GetObject())
    -			if err != nil {
    -				return true, nil, err
    -			}
    -			if action.GetSubresource() == "" {
    -				err = tracker.Create(gvr, action.GetObject(), ns)
    -			} else {
    -				oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName())
    -				if getOldObjErr != nil {
    -					return true, nil, getOldObjErr
    -				}
    -				// Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation.
    -				if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) {
    -					// TODO: Currently we're handling subresource creation as an update
    -					// on the enclosing resource. This works for some subresources but
    -					// might not be generic enough.
    -					err = tracker.Update(gvr, action.GetObject(), ns)
    -				} else {
    -					// If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker.
    -					return true, action.GetObject(), nil
    -				}
    -			}
    -			if err != nil {
    -				return true, nil, err
    -			}
    -			obj, err := tracker.Get(gvr, ns, objMeta.GetName())
    +			obj, err := reactor.Create(action)
     			return true, obj, err
    -
     		case UpdateActionImpl:
    -			objMeta, err := meta.Accessor(action.GetObject())
    -			if err != nil {
    -				return true, nil, err
    -			}
    -			err = tracker.Update(gvr, action.GetObject(), ns)
    -			if err != nil {
    -				return true, nil, err
    -			}
    -			obj, err := tracker.Get(gvr, ns, objMeta.GetName())
    +			obj, err := reactor.Update(action)
     			return true, obj, err
    -
     		case DeleteActionImpl:
    -			err := tracker.Delete(gvr, ns, action.GetName())
    -			if err != nil {
    -				return true, nil, err
    -			}
    -			return true, nil, nil
    -
    +			obj, err := reactor.Delete(action)
    +			return true, obj, err
     		case PatchActionImpl:
    -			obj, err := tracker.Get(gvr, ns, action.GetName())
    -			if err != nil {
    -				return true, nil, err
    +			if action.GetPatchType() == types.ApplyPatchType {
    +				obj, err := reactor.Apply(action)
    +				return true, obj, err
     			}
    +			obj, err := reactor.Patch(action)
    +			return true, obj, err
    +		default:
    +			return false, nil, fmt.Errorf("no reaction implemented for %s", action)
    +		}
    +	}
    +}
     
    -			old, err := json.Marshal(obj)
    -			if err != nil {
    -				return true, nil, err
    -			}
    +type objectTrackerReact struct {
    +	tracker ObjectTracker
    +}
     
    -			// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
    -			// in obj that are removed by patch are cleared
    -			value := reflect.ValueOf(obj)
    -			value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
    -
    -			switch action.GetPatchType() {
    -			case types.JSONPatchType:
    -				patch, err := jsonpatch.DecodePatch(action.GetPatch())
    -				if err != nil {
    -					return true, nil, err
    -				}
    -				modified, err := patch.Apply(old)
    -				if err != nil {
    -					return true, nil, err
    -				}
    -
    -				if err = json.Unmarshal(modified, obj); err != nil {
    -					return true, nil, err
    -				}
    -			case types.MergePatchType:
    -				modified, err := jsonpatch.MergePatch(old, action.GetPatch())
    -				if err != nil {
    -					return true, nil, err
    -				}
    -
    -				if err := json.Unmarshal(modified, obj); err != nil {
    -					return true, nil, err
    -				}
    -			case types.StrategicMergePatchType, types.ApplyPatchType:
    -				mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj)
    -				if err != nil {
    -					return true, nil, err
    -				}
    -				if err = json.Unmarshal(mergedByte, obj); err != nil {
    -					return true, nil, err
    -				}
    -			default:
    -				return true, nil, fmt.Errorf("PatchType is not supported")
    -			}
    +func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) {
    +	return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions)
    +}
     
    -			if err = tracker.Update(gvr, obj, ns); err != nil {
    -				return true, nil, err
    -			}
    +func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) {
    +	return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions)
    +}
    +
    +func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) {
    +	ns := action.GetNamespace()
    +	gvr := action.GetResource()
    +	objMeta, err := meta.Accessor(action.GetObject())
    +	if err != nil {
    +		return nil, err
    +	}
    +	if action.GetSubresource() == "" {
    +		err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions)
    +		if err != nil {
    +			return nil, err
    +		}
    +	} else {
    +		oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +		if getOldObjErr != nil {
    +			return nil, getOldObjErr
    +		}
    +		// Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation.
    +		if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) {
    +			// TODO: Currently we're handling subresource creation as an update
    +			// on the enclosing resource. This works for some subresources but
    +			// might not be generic enough.
    +			err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{
    +				DryRun:          action.CreateOptions.DryRun,
    +				FieldManager:    action.CreateOptions.FieldManager,
    +				FieldValidation: action.CreateOptions.FieldValidation,
    +			})
    +		} else {
    +			// If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker.
    +			return action.GetObject(), nil
    +		}
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +	return obj, err
    +}
    +
    +func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) {
    +	ns := action.GetNamespace()
    +	gvr := action.GetResource()
    +	objMeta, err := meta.Accessor(action.GetObject())
    +	if err != nil {
    +		return nil, err
    +	}
     
    -			return true, obj, nil
    +	err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions)
    +	if err != nil {
    +		return nil, err
    +	}
     
    -		default:
    -			return false, nil, fmt.Errorf("no reaction implemented for %s", action)
    +	obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +	return obj, err
    +}
    +
    +func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) {
    +	err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions)
    +	return nil, err
    +}
    +
    +func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) {
    +	ns := action.GetNamespace()
    +	gvr := action.GetResource()
    +
    +	patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
    +	if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil {
    +		return nil, err
    +	}
    +	err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions)
    +	if err != nil {
    +		return nil, err
    +	}
    +	obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{})
    +	return obj, err
    +}
    +
    +func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) {
    +	ns := action.GetNamespace()
    +	gvr := action.GetResource()
    +
    +	obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{})
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	old, err := json.Marshal(obj)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
    +	// in obj that are removed by patch are cleared
    +	value := reflect.ValueOf(obj)
    +	value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
    +
    +	switch action.GetPatchType() {
    +	case types.JSONPatchType:
    +		patch, err := jsonpatch.DecodePatch(action.GetPatch())
    +		if err != nil {
    +			return nil, err
    +		}
    +		modified, err := patch.Apply(old)
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		if err = json.Unmarshal(modified, obj); err != nil {
    +			return nil, err
     		}
    +	case types.MergePatchType:
    +		modified, err := jsonpatch.MergePatch(old, action.GetPatch())
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		if err := json.Unmarshal(modified, obj); err != nil {
    +			return nil, err
    +		}
    +	case types.StrategicMergePatchType:
    +		mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj)
    +		if err != nil {
    +			return nil, err
    +		}
    +		if err = json.Unmarshal(mergedByte, obj); err != nil {
    +			return nil, err
    +		}
    +	default:
    +		return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType())
     	}
    +
    +	if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil {
    +		return nil, err
    +	}
    +
    +	return obj, nil
     }
     
     type tracker struct {
    @@ -231,7 +309,11 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke
     	}
     }
     
    -func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
    +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return nil, err
    +	}
     	// Heuristic for list kind: original kind + List suffix. Might
     	// not always be true but this tracker has a pretty limited
     	// understanding of the actual API model.
    @@ -270,7 +352,12 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK
     	return list.DeepCopyObject(), nil
     }
     
    -func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) {
    +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +
     	t.lock.Lock()
     	defer t.lock.Unlock()
     
    @@ -283,8 +370,12 @@ func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Inter
     	return fakewatcher, nil
     }
     
    -func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) {
    -	errNotFound := errors.NewNotFound(gvr.GroupResource(), name)
    +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name)
     
     	t.lock.RLock()
     	defer t.lock.RUnlock()
    @@ -305,7 +396,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime
     	obj := matchingObj.DeepCopyObject()
     	if status, ok := obj.(*metav1.Status); ok {
     		if status.Status != metav1.StatusSuccess {
    -			return nil, &errors.StatusError{ErrStatus: *status}
    +			return nil, &apierrors.StatusError{ErrStatus: *status}
     		}
     	}
     
    @@ -352,11 +443,70 @@ func (t *tracker) Add(obj runtime.Object) error {
     	return nil
     }
     
    -func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
    +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return err
    +	}
     	return t.add(gvr, obj, ns, false)
     }
     
    -func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
    +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return err
    +	}
    +	return t.add(gvr, obj, ns, true)
    +}
    +
    +func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return err
    +	}
    +	return t.add(gvr, patchedObject, ns, true)
    +}
    +
    +func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return err
    +	}
    +	applyConfigurationMeta, err := meta.Accessor(applyConfiguration)
    +	if err != nil {
    +		return err
    +	}
    +
    +	obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{})
    +	if err != nil {
    +		return err
    +	}
    +
    +	old, err := json.Marshal(obj)
    +	if err != nil {
    +		return err
    +	}
    +
    +	// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
    +	// in obj that are removed by patch are cleared
    +	value := reflect.ValueOf(obj)
    +	value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
    +
    +	// For backward compatibility with behavior 1.30 and earlier, continue to handle apply
    +	// via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker
    +	// for full field manager support).
    +	patch, err := json.Marshal(applyConfiguration)
    +	if err != nil {
    +		return err
    +	}
    +	mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj)
    +	if err != nil {
    +		return err
    +	}
    +	if err = json.Unmarshal(mergedByte, obj); err != nil {
    +		return err
    +	}
    +
     	return t.add(gvr, obj, ns, true)
     }
     
    @@ -398,7 +548,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st
     
     	if ns != newMeta.GetNamespace() {
     		msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace())
    -		return errors.NewBadRequest(msg)
    +		return apierrors.NewBadRequest(msg)
     	}
     
     	_, ok := t.objects[gvr]
    @@ -416,12 +566,12 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st
     			t.objects[gvr][namespacedName] = obj
     			return nil
     		}
    -		return errors.NewAlreadyExists(gr, newMeta.GetName())
    +		return apierrors.NewAlreadyExists(gr, newMeta.GetName())
     	}
     
     	if replaceExisting {
     		// Tried to update but no matching object was found.
    -		return errors.NewNotFound(gr, newMeta.GetName())
    +		return apierrors.NewNotFound(gr, newMeta.GetName())
     	}
     
     	t.objects[gvr][namespacedName] = obj
    @@ -451,19 +601,23 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error {
     	return nil
     }
     
    -func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
    +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error {
    +	_, err := assertOptionalSingleArgument(opts)
    +	if err != nil {
    +		return err
    +	}
     	t.lock.Lock()
     	defer t.lock.Unlock()
     
     	objs, ok := t.objects[gvr]
     	if !ok {
    -		return errors.NewNotFound(gvr.GroupResource(), name)
    +		return apierrors.NewNotFound(gvr.GroupResource(), name)
     	}
     
     	namespacedName := types.NamespacedName{Namespace: ns, Name: name}
     	obj, ok := objs[namespacedName]
     	if !ok {
    -		return errors.NewNotFound(gvr.GroupResource(), name)
    +		return apierrors.NewNotFound(gvr.GroupResource(), name)
     	}
     
     	delete(objs, namespacedName)
    @@ -473,6 +627,203 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error
     	return nil
     }
     
    +type managedFieldObjectTracker struct {
    +	ObjectTracker
    +	scheme          ObjectScheme
    +	objectConverter runtime.ObjectConvertor
    +	mapper          meta.RESTMapper
    +	typeConverter   managedfields.TypeConverter
    +}
    +
    +var _ ObjectTracker = &managedFieldObjectTracker{}
    +
    +// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track
    +// of objects and managed fields for the fake clientset. Mostly useful for unit tests.
    +func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker {
    +	return &managedFieldObjectTracker{
    +		ObjectTracker:   NewObjectTracker(scheme, decoder),
    +		scheme:          scheme,
    +		objectConverter: scheme,
    +		mapper:          testrestmapper.TestOnlyStaticRESTMapper(scheme),
    +		typeConverter:   typeConverter,
    +	}
    +}
    +
    +func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error {
    +	opts, err := assertOptionalSingleArgument(vopts)
    +	if err != nil {
    +		return err
    +	}
    +	gvk, err := t.mapper.KindFor(gvr)
    +	if err != nil {
    +		return err
    +	}
    +	mgr, err := t.fieldManagerFor(gvk)
    +	if err != nil {
    +		return err
    +	}
    +
    +	objType, err := meta.TypeAccessor(obj)
    +	if err != nil {
    +		return err
    +	}
    +	// Stamp GVK
    +	apiVersion, kind := gvk.ToAPIVersionAndKind()
    +	objType.SetAPIVersion(apiVersion)
    +	objType.SetKind(kind)
    +
    +	objMeta, err := meta.Accessor(obj)
    +	if err != nil {
    +		return err
    +	}
    +	liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +	if apierrors.IsNotFound(err) {
    +		liveObject, err = t.scheme.New(gvk)
    +		if err != nil {
    +			return err
    +		}
    +		liveObject.GetObjectKind().SetGroupVersionKind(gvk)
    +	} else if err != nil {
    +		return err
    +	}
    +	objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager)
    +	if err != nil {
    +		return err
    +	}
    +	return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts)
    +}
    +
    +func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error {
    +	opts, err := assertOptionalSingleArgument(vopts)
    +	if err != nil {
    +		return err
    +	}
    +	gvk, err := t.mapper.KindFor(gvr)
    +	if err != nil {
    +		return err
    +	}
    +	mgr, err := t.fieldManagerFor(gvk)
    +	if err != nil {
    +		return err
    +	}
    +
    +	objMeta, err := meta.Accessor(obj)
    +	if err != nil {
    +		return err
    +	}
    +	oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +	if err != nil {
    +		return err
    +	}
    +	objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager)
    +	if err != nil {
    +		return err
    +	}
    +
    +	return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts)
    +}
    +
    +func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error {
    +	opts, err := assertOptionalSingleArgument(vopts)
    +	if err != nil {
    +		return err
    +	}
    +	gvk, err := t.mapper.KindFor(gvr)
    +	if err != nil {
    +		return err
    +	}
    +	mgr, err := t.fieldManagerFor(gvk)
    +	if err != nil {
    +		return err
    +	}
    +
    +	objMeta, err := meta.Accessor(patchedObject)
    +	if err != nil {
    +		return err
    +	}
    +	oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
    +	if err != nil {
    +		return err
    +	}
    +	objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager)
    +	if err != nil {
    +		return err
    +	}
    +	return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...)
    +}
    +
    +func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error {
    +	opts, err := assertOptionalSingleArgument(vopts)
    +	if err != nil {
    +		return err
    +	}
    +	gvk, err := t.mapper.KindFor(gvr)
    +	if err != nil {
    +		return err
    +	}
    +	applyConfigurationMeta, err := meta.Accessor(applyConfiguration)
    +	if err != nil {
    +		return err
    +	}
    +
    +	exists := true
    +	liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{})
    +	if apierrors.IsNotFound(err) {
    +		exists = false
    +		liveObject, err = t.scheme.New(gvk)
    +		if err != nil {
    +			return err
    +		}
    +		liveObject.GetObjectKind().SetGroupVersionKind(gvk)
    +	} else if err != nil {
    +		return err
    +	}
    +	mgr, err := t.fieldManagerFor(gvk)
    +	if err != nil {
    +		return err
    +	}
    +	force := false
    +	if opts.Force != nil {
    +		force = *opts.Force
    +	}
    +	objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force)
    +	if err != nil {
    +		return err
    +	}
    +
    +	if !exists {
    +		return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{
    +			DryRun:          opts.DryRun,
    +			FieldManager:    opts.FieldManager,
    +			FieldValidation: opts.FieldValidation,
    +		})
    +	} else {
    +		return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{
    +			DryRun:          opts.DryRun,
    +			FieldManager:    opts.FieldManager,
    +			FieldValidation: opts.FieldValidation,
    +		})
    +	}
    +}
    +
    +func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) {
    +	return managedfields.NewDefaultFieldManager(
    +		t.typeConverter,
    +		t.objectConverter,
    +		&objectDefaulter{},
    +		t.scheme,
    +		gvk,
    +		gvk.GroupVersion(),
    +		"",
    +		nil)
    +}
    +
    +// objectDefaulter implements runtime.Defaulter, but it actually
    +// does nothing.
    +type objectDefaulter struct{}
    +
    +func (d *objectDefaulter) Default(_ runtime.Object) {}
    +
     // filterByNamespace returns all objects in the collection that
     // match provided namespace. Empty namespace matches
     // non-namespaced objects.
    @@ -579,3 +930,76 @@ func resourceCovers(resource string, action Action) bool {
     
     	return false
     }
    +
    +// assertOptionalSingleArgument returns an error if there is more than one variadic argument.
    +// Otherwise, it returns the first variadic argument, or zero value if there are no arguments.
    +func assertOptionalSingleArgument[T any](arguments []T) (T, error) {
    +	var a T
    +	switch len(arguments) {
    +	case 0:
    +		return a, nil
    +	case 1:
    +		return arguments[0], nil
    +	default:
    +		return a, fmt.Errorf("expected only one option argument but got %d", len(arguments))
    +	}
    +}
    +
    +type TypeResolver interface {
    +	Type(openAPIName string) typed.ParseableType
    +}
    +
    +type TypeConverter struct {
    +	Scheme       *runtime.Scheme
    +	TypeResolver TypeResolver
    +}
    +
    +func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
    +	gvk := obj.GetObjectKind().GroupVersionKind()
    +	name, err := tc.openAPIName(gvk)
    +	if err != nil {
    +		return nil, err
    +	}
    +	t := tc.TypeResolver.Type(name)
    +	switch o := obj.(type) {
    +	case *unstructured.Unstructured:
    +		return t.FromUnstructured(o.UnstructuredContent(), opts...)
    +	default:
    +		return t.FromStructured(obj, opts...)
    +	}
    +}
    +
    +func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
    +	vu := value.AsValue().Unstructured()
    +	switch o := vu.(type) {
    +	case map[string]interface{}:
    +		return &unstructured.Unstructured{Object: o}, nil
    +	default:
    +		return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
    +	}
    +}
    +
    +func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) {
    +	example, err := tc.Scheme.New(kind)
    +	if err != nil {
    +		return "", err
    +	}
    +	rtype := reflect.TypeOf(example).Elem()
    +	name := friendlyName(rtype.PkgPath() + "." + rtype.Name())
    +	return name, nil
    +}
    +
    +// This is a copy of openapi.friendlyName.
    +// TODO: consider introducing a shared version of this function in apimachinery.
    +func friendlyName(name string) string {
    +	nameParts := strings.Split(name, "/")
    +	// Reverse first part. e.g., io.k8s... instead of k8s.io...
    +	if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
    +		parts := strings.Split(nameParts[0], ".")
    +		for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
    +			parts[i], parts[j] = parts[j], parts[i]
    +		}
    +		nameParts[0] = strings.Join(parts, ".")
    +	}
    +	return strings.Join(nameParts, ".")
    +}
    diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
    index ee19a5af95..e523a66522 100644
    --- a/vendor/k8s.io/client-go/tools/cache/controller.go
    +++ b/vendor/k8s.io/client-go/tools/cache/controller.go
    @@ -59,6 +59,12 @@ type Config struct {
     	// FullResyncPeriod is the period at which ShouldResync is considered.
     	FullResyncPeriod time.Duration
     
    +	// MinWatchTimeout, if set, will define the minimum timeout for watch requests send
    +	// to kube-apiserver. However, values lower than 5m will not be honored to avoid
    +	// negative performance impact on controlplane.
    +	// Optional - if unset a default value of 5m will be used.
    +	MinWatchTimeout time.Duration
    +
     	// ShouldResync is periodically used by the reflector to determine
     	// whether to Resync the Queue. If ShouldResync is `nil` or
     	// returns true, it means the reflector should proceed with the
    @@ -138,6 +144,7 @@ func (c *controller) Run(stopCh <-chan struct{}) {
     		c.config.Queue,
     		ReflectorOptions{
     			ResyncPeriod:    c.config.FullResyncPeriod,
    +			MinWatchTimeout: c.config.MinWatchTimeout,
     			TypeDescription: c.config.ObjectDescription,
     			Clock:           c.clock,
     		},
    @@ -346,6 +353,58 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) {
     	return ObjectToName(obj)
     }
     
    +// InformerOptions configure a Reflector.
    +type InformerOptions struct {
    +	// ListerWatcher implements List and Watch functions for the source of the resource
    +	// the informer will be informing about.
    +	ListerWatcher ListerWatcher
    +
    +	// ObjectType is an object of the type that informer is expected to receive.
    +	ObjectType runtime.Object
    +
    +	// Handler defines functions that should called on object mutations.
    +	Handler ResourceEventHandler
    +
    +	// ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store
    +	// is re-synced with that frequency - Modify events are delivered even if objects
    +	// didn't change.
    +	// This is useful for synchronizing objects that configure external resources
    +	// (e.g. configure cloud provider functionalities).
    +	// Optional - if unset, store resyncing is not happening periodically.
    +	ResyncPeriod time.Duration
    +
    +	// MinWatchTimeout, if set, will define the minimum timeout for watch requests send
    +	// to kube-apiserver. However, values lower than 5m will not be honored to avoid
    +	// negative performance impact on controlplane.
    +	// Optional - if unset a default value of 5m will be used.
    +	MinWatchTimeout time.Duration
    +
    +	// Indexers, if set, are the indexers for the received objects to optimize
    +	// certain queries.
    +	// Optional - if unset no indexes are maintained.
    +	Indexers Indexers
    +
    +	// Transform function, if set, will be called on all objects before they will be
    +	// put into the Store and corresponding Add/Modify/Delete handlers will be invoked
    +	// for them.
    +	// Optional - if unset no additional transforming is happening.
    +	Transform TransformFunc
    +}
    +
    +// NewInformerWithOptions returns a Store and a controller for populating the store
    +// while also providing event notifications. You should only used the returned
    +// Store for Get/List operations; Add/Modify/Deletes will cause the event
    +// notifications to be faulty.
    +func NewInformerWithOptions(options InformerOptions) (Store, Controller) {
    +	var clientState Store
    +	if options.Indexers == nil {
    +		clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc)
    +	} else {
    +		clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers)
    +	}
    +	return clientState, newInformer(clientState, options)
    +}
    +
     // NewInformer returns a Store and a controller for populating the store
     // while also providing event notifications. You should only used the returned
     // Store for Get/List operations; Add/Modify/Deletes will cause the event
    @@ -360,6 +419,8 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) {
     //     long as possible (until the upstream source closes the watch or times out,
     //     or you stop the controller).
     //   - h is the object you want notifications sent to.
    +//
    +// Deprecated: Use NewInformerWithOptions instead.
     func NewInformer(
     	lw ListerWatcher,
     	objType runtime.Object,
    @@ -369,7 +430,13 @@ func NewInformer(
     	// This will hold the client state, as we know it.
     	clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
     
    -	return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
    +	options := InformerOptions{
    +		ListerWatcher: lw,
    +		ObjectType:    objType,
    +		Handler:       h,
    +		ResyncPeriod:  resyncPeriod,
    +	}
    +	return clientState, newInformer(clientState, options)
     }
     
     // NewIndexerInformer returns an Indexer and a Controller for populating the index
    @@ -387,6 +454,8 @@ func NewInformer(
     //     or you stop the controller).
     //   - h is the object you want notifications sent to.
     //   - indexers is the indexer for the received object type.
    +//
    +// Deprecated: Use NewInformerWithOptions instead.
     func NewIndexerInformer(
     	lw ListerWatcher,
     	objType runtime.Object,
    @@ -397,7 +466,14 @@ func NewIndexerInformer(
     	// This will hold the client state, as we know it.
     	clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
     
    -	return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
    +	options := InformerOptions{
    +		ListerWatcher: lw,
    +		ObjectType:    objType,
    +		Handler:       h,
    +		ResyncPeriod:  resyncPeriod,
    +		Indexers:      indexers,
    +	}
    +	return clientState, newInformer(clientState, options)
     }
     
     // NewTransformingInformer returns a Store and a controller for populating
    @@ -407,6 +483,8 @@ func NewIndexerInformer(
     // The given transform function will be called on all objects before they will
     // put into the Store and corresponding Add/Modify/Delete handlers will
     // be invoked for them.
    +//
    +// Deprecated: Use NewInformerWithOptions instead.
     func NewTransformingInformer(
     	lw ListerWatcher,
     	objType runtime.Object,
    @@ -417,7 +495,14 @@ func NewTransformingInformer(
     	// This will hold the client state, as we know it.
     	clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
     
    -	return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
    +	options := InformerOptions{
    +		ListerWatcher: lw,
    +		ObjectType:    objType,
    +		Handler:       h,
    +		ResyncPeriod:  resyncPeriod,
    +		Transform:     transformer,
    +	}
    +	return clientState, newInformer(clientState, options)
     }
     
     // NewTransformingIndexerInformer returns an Indexer and a controller for
    @@ -427,6 +512,8 @@ func NewTransformingInformer(
     // The given transform function will be called on all objects before they will
     // be put into the Index and corresponding Add/Modify/Delete handlers will
     // be invoked for them.
    +//
    +// Deprecated: Use NewInformerWithOptions instead.
     func NewTransformingIndexerInformer(
     	lw ListerWatcher,
     	objType runtime.Object,
    @@ -438,7 +525,15 @@ func NewTransformingIndexerInformer(
     	// This will hold the client state, as we know it.
     	clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
     
    -	return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
    +	options := InformerOptions{
    +		ListerWatcher: lw,
    +		ObjectType:    objType,
    +		Handler:       h,
    +		ResyncPeriod:  resyncPeriod,
    +		Indexers:      indexers,
    +		Transform:     transformer,
    +	}
    +	return clientState, newInformer(clientState, options)
     }
     
     // Multiplexes updates in the form of a list of Deltas into a Store, and informs
    @@ -481,42 +576,29 @@ func processDeltas(
     // providing event notifications.
     //
     // Parameters
    -//   - lw is list and watch functions for the source of the resource you want to
    -//     be informed of.
    -//   - objType is an object of the type that you expect to receive.
    -//   - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
    -//     calls, even if nothing changed). Otherwise, re-list will be delayed as
    -//     long as possible (until the upstream source closes the watch or times out,
    -//     or you stop the controller).
    -//   - h is the object you want notifications sent to.
     //   - clientState is the store you want to populate
    -func newInformer(
    -	lw ListerWatcher,
    -	objType runtime.Object,
    -	resyncPeriod time.Duration,
    -	h ResourceEventHandler,
    -	clientState Store,
    -	transformer TransformFunc,
    -) Controller {
    +//   - options contain the options to configure the controller
    +func newInformer(clientState Store, options InformerOptions) Controller {
     	// This will hold incoming changes. Note how we pass clientState in as a
     	// KeyLister, that way resync operations will result in the correct set
     	// of update/delete deltas.
     	fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
     		KnownObjects:          clientState,
     		EmitDeltaTypeReplaced: true,
    -		Transformer:           transformer,
    +		Transformer:           options.Transform,
     	})
     
     	cfg := &Config{
     		Queue:            fifo,
    -		ListerWatcher:    lw,
    -		ObjectType:       objType,
    -		FullResyncPeriod: resyncPeriod,
    +		ListerWatcher:    options.ListerWatcher,
    +		ObjectType:       options.ObjectType,
    +		FullResyncPeriod: options.ResyncPeriod,
    +		MinWatchTimeout:  options.MinWatchTimeout,
     		RetryOnError:     false,
     
     		Process: func(obj interface{}, isInInitialList bool) error {
     			if deltas, ok := obj.(Deltas); ok {
    -				return processDeltas(h, clientState, deltas, isInInitialList)
    +				return processDeltas(options.Handler, clientState, deltas, isInInitialList)
     			}
     			return errors.New("object given as Process argument is not Deltas")
     		},
    diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
    index 7160bb1ee7..ce74dfb6f1 100644
    --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
    +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
    @@ -139,20 +139,17 @@ type DeltaFIFO struct {
     }
     
     // TransformFunc allows for transforming an object before it will be processed.
    -// TransformFunc (similarly to ResourceEventHandler functions) should be able
    -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown.
    -//
    -// New in v1.27: In such cases, the contained object will already have gone
    -// through the transform object separately (when it was added / updated prior
    -// to the delete), so the TransformFunc can likely safely ignore such objects
    -// (i.e., just return the input object).
     //
     // The most common usage pattern is to clean-up some parts of the object to
     // reduce component memory usage if a given component doesn't care about them.
     //
    -// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc
    -// sees the object before any other actor, and it is now safe to mutate the
    -// object in place instead of making a copy.
    +// New in v1.27: TransformFunc sees the object before any other actor, and it
    +// is now safe to mutate the object in place instead of making a copy.
    +//
    +// It's recommended for the TransformFunc to be idempotent.
    +// It MUST be idempotent if objects already present in the cache are passed to
    +// the Replace() to avoid re-mutating them. Default informers do not pass
    +// existing objects to Replace though.
     //
     // Note that TransformFunc is called while inserting objects into the
     // notification queue and is therefore extremely performance sensitive; please
    @@ -440,22 +437,38 @@ func isDeletionDup(a, b *Delta) *Delta {
     // queueActionLocked appends to the delta list for the object.
     // Caller must lock first.
     func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
    +	return f.queueActionInternalLocked(actionType, actionType, obj)
    +}
    +
    +// queueActionInternalLocked appends to the delta list for the object.
    +// The actionType is emitted and must honor emitDeltaTypeReplaced.
    +// The internalActionType is only used within this function and must
    +// ignore emitDeltaTypeReplaced.
    +// Caller must lock first.
    +func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error {
     	id, err := f.KeyOf(obj)
     	if err != nil {
     		return KeyError{obj, err}
     	}
     
     	// Every object comes through this code path once, so this is a good
    -	// place to call the transform func.  If obj is a
    -	// DeletedFinalStateUnknown tombstone, then the containted inner object
    -	// will already have gone through the transformer, but we document that
    -	// this can happen. In cases involving Replace(), such an object can
    -	// come through multiple times.
    +	// place to call the transform func.
    +	//
    +	// If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync,
    +	// then the object have already gone through the transformer.
    +	//
    +	// If the objects already present in the cache are passed to Replace(),
    +	// the transformer must be idempotent to avoid re-mutating them,
    +	// or coordinate with all readers from the cache to avoid data races.
    +	// Default informers do not pass existing objects to Replace.
     	if f.transformer != nil {
    -		var err error
    -		obj, err = f.transformer(obj)
    -		if err != nil {
    -			return err
    +		_, isTombstone := obj.(DeletedFinalStateUnknown)
    +		if !isTombstone && internalActionType != Sync {
    +			var err error
    +			obj, err = f.transformer(obj)
    +			if err != nil {
    +				return err
    +			}
     		}
     	}
     
    @@ -638,7 +651,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
     			return KeyError{item, err}
     		}
     		keys.Insert(key)
    -		if err := f.queueActionLocked(action, item); err != nil {
    +		if err := f.queueActionInternalLocked(action, Replaced, item); err != nil {
     			return fmt.Errorf("couldn't enqueue object: %v", err)
     		}
     	}
    diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go
    index 420ca7b2ac..a60f44943e 100644
    --- a/vendor/k8s.io/client-go/tools/cache/listers.go
    +++ b/vendor/k8s.io/client-go/tools/cache/listers.go
    @@ -30,7 +30,7 @@ import (
     // AppendFunc is used to add a matching item to whatever list the caller is using
     type AppendFunc func(interface{})
     
    -// ListAll calls appendFn with each value retrieved from store which matches the selector.
    +// ListAll lists items in the store matching the given selector, calling appendFn on each one.
     func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
     	selectAll := selector.Empty()
     	for _, m := range store.List() {
    @@ -51,7 +51,9 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
     	return nil
     }
     
    -// ListAllByNamespace used to list items belongs to namespace from Indexer.
    +// ListAllByNamespace lists items in the given namespace in the store matching the given selector,
    +// calling appendFn on each one.
    +// If a blank namespace (NamespaceAll) is specified, this delegates to ListAll().
     func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
     	if namespace == metav1.NamespaceAll {
     		return ListAll(indexer, selector, appendFn)
    diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go
    index 10b7e6512e..f5708ffeb1 100644
    --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go
    +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go
    @@ -36,6 +36,10 @@ type Lister interface {
     // Watcher is any object that knows how to start a watch on a resource.
     type Watcher interface {
     	// Watch should begin a watch at the specified version.
    +	//
    +	// If Watch returns an error, it should handle its own cleanup, including
    +	// but not limited to calling Stop() on the watch, if one was constructed.
    +	// This allows the caller to ignore the watch, if the error is non-nil.
     	Watch(options metav1.ListOptions) (watch.Interface, error)
     }
     
    diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go
    index f733e244cc..5e7dd57409 100644
    --- a/vendor/k8s.io/client-go/tools/cache/reflector.go
    +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go
    @@ -22,7 +22,6 @@ import (
     	"fmt"
     	"io"
     	"math/rand"
    -	"os"
     	"reflect"
     	"strings"
     	"sync"
    @@ -39,6 +38,7 @@ import (
     	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
     	"k8s.io/apimachinery/pkg/util/wait"
     	"k8s.io/apimachinery/pkg/watch"
    +	clientfeatures "k8s.io/client-go/features"
     	"k8s.io/client-go/tools/pager"
     	"k8s.io/klog/v2"
     	"k8s.io/utils/clock"
    @@ -49,6 +49,12 @@ import (
     
     const defaultExpectedTypeName = ""
     
    +var (
    +	// We try to spread the load on apiserver by setting timeouts for
    +	// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
    +	defaultMinWatchTimeout = 5 * time.Minute
    +)
    +
     // Reflector watches a specified resource and causes all changes to be reflected in the given store.
     type Reflector struct {
     	// name identifies this reflector. By default it will be a file:line if possible.
    @@ -72,6 +78,8 @@ type Reflector struct {
     	// backoff manages backoff of ListWatch
     	backoffManager wait.BackoffManager
     	resyncPeriod   time.Duration
    +	// minWatchTimeout defines the minimum timeout for watch requests.
    +	minWatchTimeout time.Duration
     	// clock allows tests to manipulate time
     	clock clock.Clock
     	// paginatedResult defines whether pagination should be forced for list calls.
    @@ -151,12 +159,6 @@ func DefaultWatchErrorHandler(r *Reflector, err error) {
     	}
     }
     
    -var (
    -	// We try to spread the load on apiserver by setting timeouts for
    -	// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
    -	minWatchTimeout = 5 * time.Minute
    -)
    -
     // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
     // The indexer is configured to key on namespace
     func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
    @@ -194,6 +196,10 @@ type ReflectorOptions struct {
     	// (do not resync).
     	ResyncPeriod time.Duration
     
    +	// MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver.
    +	// However, values lower than 5m will not be honored to avoid negative performance impact on controlplane.
    +	MinWatchTimeout time.Duration
    +
     	// Clock allows tests to control time. If unset defaults to clock.RealClock{}
     	Clock clock.Clock
     }
    @@ -213,9 +219,14 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
     	if reflectorClock == nil {
     		reflectorClock = clock.RealClock{}
     	}
    +	minWatchTimeout := defaultMinWatchTimeout
    +	if options.MinWatchTimeout > defaultMinWatchTimeout {
    +		minWatchTimeout = options.MinWatchTimeout
    +	}
     	r := &Reflector{
     		name:            options.Name,
     		resyncPeriod:    options.ResyncPeriod,
    +		minWatchTimeout: minWatchTimeout,
     		typeDescription: options.TypeDescription,
     		listerWatcher:   lw,
     		store:           store,
    @@ -243,9 +254,7 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
     	// don't overwrite UseWatchList if already set
     	// because the higher layers (e.g. storage/cacher) disabled it on purpose
     	if r.UseWatchList == nil {
    -		if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
    -			r.UseWatchList = ptr.To(true)
    -		}
    +		r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient))
     	}
     
     	return r
    @@ -357,12 +366,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
     	}
     
     	klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name)
    -
    -	resyncerrc := make(chan error, 1)
    -	cancelCh := make(chan struct{})
    -	defer close(cancelCh)
    -	go r.startResync(stopCh, cancelCh, resyncerrc)
    -	return r.watch(w, stopCh, resyncerrc)
    +	return r.watchWithResync(w, stopCh)
     }
     
     // startResync periodically calls r.store.Resync() method.
    @@ -393,6 +397,15 @@ func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}
     	}
     }
     
    +// watchWithResync runs watch with startResync in the background.
    +func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error {
    +	resyncerrc := make(chan error, 1)
    +	cancelCh := make(chan struct{})
    +	defer close(cancelCh)
    +	go r.startResync(stopCh, cancelCh, resyncerrc)
    +	return r.watch(w, stopCh, resyncerrc)
    +}
    +
     // watch simply starts a watch request with the server.
     func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error {
     	var err error
    @@ -415,7 +428,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
     		start := r.clock.Now()
     
     		if w == nil {
    -			timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
    +			timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
     			options := metav1.ListOptions{
     				ResourceVersion: r.LastSyncResourceVersion(),
     				// We want to avoid situations of hanging watchers. Stop any watchers that do not
    @@ -442,13 +455,14 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
     			}
     		}
     
    -		err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh)
    +		err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion,
    +			r.clock, resyncerrc, stopCh)
     		// Ensure that watch will not be reused across iterations.
     		w.Stop()
     		w = nil
     		retry.After(err)
     		if err != nil {
    -			if err != errorStopRequested {
    +			if !errors.Is(err, errorStopRequested) {
     				switch {
     				case isExpiredError(err):
     					// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
    @@ -642,7 +656,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
     		// TODO(#115478): large "list", slow clients, slow network, p&f
     		//  might slow down streaming and eventually fail.
     		//  maybe in such a case we should retry with an increased timeout?
    -		timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
    +		timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
     		options := metav1.ListOptions{
     			ResourceVersion:      lastKnownRV,
     			AllowWatchBookmarks:  true,
    @@ -659,14 +673,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
     			}
     			return nil, err
     		}
    -		bookmarkReceived := pointer.Bool(false)
    -		err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription,
    +		watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription,
     			func(rv string) { resourceVersion = rv },
    -			bookmarkReceived,
     			r.clock, make(chan error), stopCh)
     		if err != nil {
     			w.Stop() // stop and retry with clean state
    -			if err == errorStopRequested {
    +			if errors.Is(err, errorStopRequested) {
     				return nil, nil
     			}
     			if isErrorRetriableWithSideEffectsFn(err) {
    @@ -674,7 +686,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
     			}
     			return nil, err
     		}
    -		if *bookmarkReceived {
    +		if watchListBookmarkReceived {
     			break
     		}
     	}
    @@ -686,10 +698,10 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
     	// we utilize the temporaryStore to ensure independence from the current store implementation.
     	// as of today, the store is implemented as a queue and will be drained by the higher-level
     	// component as soon as it finishes replacing the content.
    -	checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore)
    +	checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List)
     
    -	if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
    -		return nil, fmt.Errorf("unable to sync watch-list result: %v", err)
    +	if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
    +		return nil, fmt.Errorf("unable to sync watch-list result: %w", err)
     	}
     	initTrace.Step("SyncWith done")
     	r.setLastSyncResourceVersion(resourceVersion)
    @@ -706,8 +718,33 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
     	return r.store.Replace(found, resourceVersion)
     }
     
    -// watchHandler watches w and sets setLastSyncResourceVersion
    -func watchHandler(start time.Time,
    +// handleListWatch consumes events from w, updates the Store, and records the
    +// last seen ResourceVersion, to allow continuing from that ResourceVersion on
    +// retry. If successful, the watcher will be left open after receiving the
    +// initial set of objects, to allow watching for future events.
    +func handleListWatch(
    +	start time.Time,
    +	w watch.Interface,
    +	store Store,
    +	expectedType reflect.Type,
    +	expectedGVK *schema.GroupVersionKind,
    +	name string,
    +	expectedTypeName string,
    +	setLastSyncResourceVersion func(string),
    +	clock clock.Clock,
    +	errCh chan error,
    +	stopCh <-chan struct{},
    +) (bool, error) {
    +	exitOnWatchListBookmarkReceived := true
    +	return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName,
    +		setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh)
    +}
    +
    +// handleListWatch consumes events from w, updates the Store, and records the
    +// last seen ResourceVersion, to allow continuing from that ResourceVersion on
    +// retry. The watcher will always be stopped on exit.
    +func handleWatch(
    +	start time.Time,
     	w watch.Interface,
     	store Store,
     	expectedType reflect.Type,
    @@ -715,31 +752,56 @@ func watchHandler(start time.Time,
     	name string,
     	expectedTypeName string,
     	setLastSyncResourceVersion func(string),
    -	exitOnInitialEventsEndBookmark *bool,
     	clock clock.Clock,
    -	errc chan error,
    +	errCh chan error,
     	stopCh <-chan struct{},
     ) error {
    +	exitOnWatchListBookmarkReceived := false
    +	_, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName,
    +		setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh)
    +	return err
    +}
    +
    +// handleAnyWatch consumes events from w, updates the Store, and records the last
    +// seen ResourceVersion, to allow continuing from that ResourceVersion on retry.
    +// If exitOnWatchListBookmarkReceived is true, the watch events will be consumed
    +// until a bookmark event is received with the WatchList annotation present.
    +// Returns true (watchListBookmarkReceived) if the WatchList bookmark was
    +// received, even if exitOnWatchListBookmarkReceived is false.
    +// The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is
    +// true and watchListBookmarkReceived is true. This allows the same watch stream
    +// to be re-used by the caller to continue watching for new events.
    +func handleAnyWatch(start time.Time,
    +	w watch.Interface,
    +	store Store,
    +	expectedType reflect.Type,
    +	expectedGVK *schema.GroupVersionKind,
    +	name string,
    +	expectedTypeName string,
    +	setLastSyncResourceVersion func(string),
    +	exitOnWatchListBookmarkReceived bool,
    +	clock clock.Clock,
    +	errCh chan error,
    +	stopCh <-chan struct{},
    +) (bool, error) {
    +	watchListBookmarkReceived := false
     	eventCount := 0
    -	if exitOnInitialEventsEndBookmark != nil {
    -		// set it to false just in case somebody
    -		// made it positive
    -		*exitOnInitialEventsEndBookmark = false
    -	}
    +	initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived)
    +	defer initialEventsEndBookmarkWarningTicker.Stop()
     
     loop:
     	for {
     		select {
     		case <-stopCh:
    -			return errorStopRequested
    -		case err := <-errc:
    -			return err
    +			return watchListBookmarkReceived, errorStopRequested
    +		case err := <-errCh:
    +			return watchListBookmarkReceived, err
     		case event, ok := <-w.ResultChan():
     			if !ok {
     				break loop
     			}
     			if event.Type == watch.Error {
    -				return apierrors.FromObject(event.Object)
    +				return watchListBookmarkReceived, apierrors.FromObject(event.Object)
     			}
     			if expectedType != nil {
     				if e, a := expectedType, reflect.TypeOf(event.Object); e != a {
    @@ -780,10 +842,8 @@ loop:
     				}
     			case watch.Bookmark:
     				// A `Bookmark` means watch has synced here, just update the resourceVersion
    -				if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" {
    -					if exitOnInitialEventsEndBookmark != nil {
    -						*exitOnInitialEventsEndBookmark = true
    -					}
    +				if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" {
    +					watchListBookmarkReceived = true
     				}
     			default:
     				utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
    @@ -793,20 +853,23 @@ loop:
     				rvu.UpdateResourceVersion(resourceVersion)
     			}
     			eventCount++
    -			if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark {
    +			if exitOnWatchListBookmarkReceived && watchListBookmarkReceived {
     				watchDuration := clock.Since(start)
     				klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration)
    -				return nil
    +				return watchListBookmarkReceived, nil
     			}
    +			initialEventsEndBookmarkWarningTicker.observeLastEventTimeStamp(clock.Now())
    +		case <-initialEventsEndBookmarkWarningTicker.C():
    +			initialEventsEndBookmarkWarningTicker.warnIfExpired()
     		}
     	}
     
     	watchDuration := clock.Since(start)
     	if watchDuration < 1*time.Second && eventCount == 0 {
    -		return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name)
    +		return watchListBookmarkReceived, fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name)
     	}
     	klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount)
    -	return nil
    +	return watchListBookmarkReceived, nil
     }
     
     // LastSyncResourceVersion is the resource version observed when last sync with the underlying store
    @@ -918,3 +981,95 @@ func isWatchErrorRetriable(err error) bool {
     	}
     	return false
     }
    +
    +// wrapListFuncWithContext simply wraps ListFunction into another function that accepts a context and ignores it.
    +func wrapListFuncWithContext(listFn ListFunc) func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
    +	return func(_ context.Context, options metav1.ListOptions) (runtime.Object, error) {
    +		return listFn(options)
    +	}
    +}
    +
    +// initialEventsEndBookmarkTicker a ticker that produces a warning if the bookmark event
    +// which marks the end of the watch stream, has not been received within the defined tick interval.
    +//
    +// Note:
    +// The methods exposed by this type are not thread-safe.
    +type initialEventsEndBookmarkTicker struct {
    +	clock.Ticker
    +	clock clock.Clock
    +	name  string
    +
    +	watchStart           time.Time
    +	tickInterval         time.Duration
    +	lastEventObserveTime time.Time
    +}
    +
    +// newInitialEventsEndBookmarkTicker returns a noop ticker if exitOnInitialEventsEndBookmarkRequested is false.
    +// Otherwise, it returns a ticker that exposes a method producing a warning if the bookmark event,
    +// which marks the end of the watch stream, has not been received within the defined tick interval.
    +//
    +// Note that the caller controls whether to call t.C() and t.Stop().
    +//
    +// In practice, the reflector exits the watchHandler as soon as the bookmark event is received and calls the t.C() method.
    +func newInitialEventsEndBookmarkTicker(name string, c clock.Clock, watchStart time.Time, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker {
    +	return newInitialEventsEndBookmarkTickerInternal(name, c, watchStart, 10*time.Second, exitOnWatchListBookmarkReceived)
    +}
    +
    +func newInitialEventsEndBookmarkTickerInternal(name string, c clock.Clock, watchStart time.Time, tickInterval time.Duration, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker {
    +	clockWithTicker, ok := c.(clock.WithTicker)
    +	if !ok || !exitOnWatchListBookmarkReceived {
    +		if exitOnWatchListBookmarkReceived {
    +			klog.Warningf("clock does not support WithTicker interface but exitOnInitialEventsEndBookmark was requested")
    +		}
    +		return &initialEventsEndBookmarkTicker{
    +			Ticker: &noopTicker{},
    +		}
    +	}
    +
    +	return &initialEventsEndBookmarkTicker{
    +		Ticker:       clockWithTicker.NewTicker(tickInterval),
    +		clock:        c,
    +		name:         name,
    +		watchStart:   watchStart,
    +		tickInterval: tickInterval,
    +	}
    +}
    +
    +func (t *initialEventsEndBookmarkTicker) observeLastEventTimeStamp(lastEventObserveTime time.Time) {
    +	t.lastEventObserveTime = lastEventObserveTime
    +}
    +
    +func (t *initialEventsEndBookmarkTicker) warnIfExpired() {
    +	if err := t.produceWarningIfExpired(); err != nil {
    +		klog.Warning(err)
    +	}
    +}
    +
    +// produceWarningIfExpired returns an error that represents a warning when
    +// the time elapsed since the last received event exceeds the tickInterval.
    +//
    +// Note that this method should be called when t.C() yields a value.
    +func (t *initialEventsEndBookmarkTicker) produceWarningIfExpired() error {
    +	if _, ok := t.Ticker.(*noopTicker); ok {
    +		return nil /*noop ticker*/
    +	}
    +	if t.lastEventObserveTime.IsZero() {
    +		return fmt.Errorf("%s: awaiting required bookmark event for initial events stream, no events received for %v", t.name, t.clock.Since(t.watchStart))
    +	}
    +	elapsedTime := t.clock.Now().Sub(t.lastEventObserveTime)
    +	hasBookmarkTimerExpired := elapsedTime >= t.tickInterval
    +
    +	if !hasBookmarkTimerExpired {
    +		return nil
    +	}
    +	return fmt.Errorf("%s: hasn't received required bookmark event marking the end of initial events stream, received last event %v ago", t.name, elapsedTime)
    +}
    +
    +var _ clock.Ticker = &noopTicker{}
    +
    +// TODO(#115478): move to k8s/utils repo
    +type noopTicker struct{}
    +
    +func (t *noopTicker) C() <-chan time.Time { return nil }
    +
    +func (t *noopTicker) Stop() {}
    diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
    index aa3027d714..a7e0d9c436 100644
    --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
    +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
    @@ -1,5 +1,5 @@
     /*
    -Copyright 2023 The Kubernetes Authors.
    +Copyright 2024 The Kubernetes Authors.
     
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
    @@ -18,102 +18,26 @@ package cache
     
     import (
     	"context"
    -	"os"
    -	"sort"
    -	"strconv"
    -	"time"
     
    -	"github.com/google/go-cmp/cmp"
    -
    -	"k8s.io/apimachinery/pkg/api/meta"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	"k8s.io/apimachinery/pkg/runtime"
    -	"k8s.io/apimachinery/pkg/util/wait"
    -	"k8s.io/klog/v2"
    +	"k8s.io/client-go/util/consistencydetector"
     )
     
    -var dataConsistencyDetectionEnabled = false
    -
    -func init() {
    -	dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"))
    -}
    -
    -// checkWatchListConsistencyIfRequested performs a data consistency check only when
    +// checkWatchListDataConsistencyIfRequested performs a data consistency check only when
     // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
     //
     // The consistency check is meant to be enforced only in the CI, not in production.
     // The check ensures that data retrieved by the watch-list api call
    -// is exactly the same as data received by the standard list api call.
    +// is exactly the same as data received by the standard list api call against etcd.
     //
     // Note that this function will panic when data inconsistency is detected.
     // This is intentional because we want to catch it in the CI.
    -func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
    -	if !dataConsistencyDetectionEnabled {
    -		return
    -	}
    -	checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store)
    -}
    -
    -// checkWatchListConsistency exists solely for testing purposes.
    -// we cannot use checkWatchListConsistencyIfRequested because
    -// it is guarded by an environmental variable.
    -// we cannot manipulate the environmental variable because
    -// it will affect other tests in this package.
    -func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
    -	klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity)
    -	opts := metav1.ListOptions{
    -		ResourceVersion:      lastSyncedResourceVersion,
    -		ResourceVersionMatch: metav1.ResourceVersionMatchExact,
    -	}
    -	var list runtime.Object
    -	err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) {
    -		list, err = listerWatcher.List(opts)
    -		if err != nil {
    -			// the consistency check will only be enabled in the CI
    -			// and LIST calls in general will be retired by the client-go library
    -			// if we fail simply log and retry
    -			klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err)
    -			return false, nil
    -		}
    -		return true, nil
    -	})
    -	if err != nil {
    -		klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err)
    +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) {
    +	if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() {
     		return
     	}
    -
    -	rawListItems, err := meta.ExtractListWithAlloc(list)
    -	if err != nil {
    -		panic(err) // this should never happen
    -	}
    -
    -	listItems := toMetaObjectSliceOrDie(rawListItems)
    -	storeItems := toMetaObjectSliceOrDie(store.List())
    -
    -	sort.Sort(byUID(listItems))
    -	sort.Sort(byUID(storeItems))
    -
    -	if !cmp.Equal(listItems, storeItems) {
    -		klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems))
    -		msg := "data inconsistency detected for the watch-list feature, panicking!"
    -		panic(msg)
    -	}
    -}
    -
    -type byUID []metav1.Object
    -
    -func (a byUID) Len() int           { return len(a) }
    -func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() }
    -func (a byUID) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
    -
    -func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object {
    -	result := make([]metav1.Object, len(s))
    -	for i, v := range s {
    -		m, err := meta.Accessor(v)
    -		if err != nil {
    -			panic(err)
    -		}
    -		result[i] = m
    -	}
    -	return result
    +	// for informers we pass an empty ListOptions because
    +	// listFn might be wrapped for filtering during informer construction.
    +	consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn)
     }
    diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
    index dd5f918067..261dcacb5b 100644
    --- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
    +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
    @@ -21,7 +21,6 @@ import (
     	"errors"
     	"fmt"
     	"os"
    -	"path"
     	"path/filepath"
     	"reflect"
     	"strings"
    @@ -115,7 +114,7 @@ func ShortenConfig(config *Config) {
     // FlattenConfig changes the config object into a self-contained config (useful for making secrets)
     func FlattenConfig(config *Config) error {
     	for key, authInfo := range config.AuthInfos {
    -		baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
    +		baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "")
     		if err != nil {
     			return err
     		}
    @@ -130,7 +129,7 @@ func FlattenConfig(config *Config) error {
     		config.AuthInfos[key] = authInfo
     	}
     	for key, cluster := range config.Clusters {
    -		baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
    +		baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "")
     		if err != nil {
     			return err
     		}
    diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go
    index 31f8963160..2cd213ccb3 100644
    --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go
    +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go
    @@ -19,7 +19,6 @@ package clientcmd
     import (
     	"errors"
     	"os"
    -	"path"
     	"path/filepath"
     	"reflect"
     	"sort"
    @@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions {
     		EnvVar:           RecommendedConfigPathEnvVar,
     		ExplicitFileFlag: RecommendedConfigPathFlag,
     
    -		GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName),
    +		GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName),
     
     		LoadingRules: NewDefaultClientConfigLoadingRules(),
     	}
    diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
    index af840c4a25..d9d87d55f2 100644
    --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
    +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
    @@ -159,6 +159,10 @@ type LeaderElectionConfig struct {
     
     	// Name is the name of the resource lock for debugging
     	Name string
    +
    +	// Coordinated will use the Coordinated Leader Election feature
    +	// WARNING: Coordinated leader election is ALPHA.
    +	Coordinated bool
     }
     
     // LeaderCallbacks are callbacks that are triggered during certain
    @@ -249,7 +253,11 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
     	desc := le.config.Lock.Describe()
     	klog.Infof("attempting to acquire leader lease %v...", desc)
     	wait.JitterUntil(func() {
    -		succeeded = le.tryAcquireOrRenew(ctx)
    +		if !le.config.Coordinated {
    +			succeeded = le.tryAcquireOrRenew(ctx)
    +		} else {
    +			succeeded = le.tryCoordinatedRenew(ctx)
    +		}
     		le.maybeReportTransition()
     		if !succeeded {
     			klog.V(4).Infof("failed to acquire lease %v", desc)
    @@ -272,7 +280,11 @@ func (le *LeaderElector) renew(ctx context.Context) {
     		timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
     		defer timeoutCancel()
     		err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
    -			return le.tryAcquireOrRenew(timeoutCtx), nil
    +			if !le.config.Coordinated {
    +				return le.tryAcquireOrRenew(timeoutCtx), nil
    +			} else {
    +				return le.tryCoordinatedRenew(timeoutCtx), nil
    +			}
     		}, timeoutCtx.Done())
     
     		le.maybeReportTransition()
    @@ -304,7 +316,9 @@ func (le *LeaderElector) release() bool {
     		RenewTime:            now,
     		AcquireTime:          now,
     	}
    -	if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
    +	timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), le.config.RenewDeadline)
    +	defer timeoutCancel()
    +	if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil {
     		klog.Errorf("Failed to release lock: %v", err)
     		return false
     	}
    @@ -313,6 +327,81 @@ func (le *LeaderElector) release() bool {
     	return true
     }
     
    +// tryCoordinatedRenew checks if it acquired a lease and tries to renew the
    +// lease if it has already been acquired. Returns true on success else returns
    +// false.
    +func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
    +	now := metav1.NewTime(le.clock.Now())
    +	leaderElectionRecord := rl.LeaderElectionRecord{
    +		HolderIdentity:       le.config.Lock.Identity(),
    +		LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
    +		RenewTime:            now,
    +		AcquireTime:          now,
    +	}
    +
    +	// 1. obtain the electionRecord
    +	oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)
    +	if err != nil {
    +		if !errors.IsNotFound(err) {
    +			klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
    +			return false
    +		}
    +		klog.Infof("lease lock not found: %v", le.config.Lock.Describe())
    +		return false
    +	}
    +
    +	// 2. Record obtained, check the Identity & Time
    +	if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {
    +		le.setObservedRecord(oldLeaderElectionRecord)
    +
    +		le.observedRawRecord = oldLeaderElectionRawRecord
    +	}
    +
    +	hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time)
    +	if hasExpired {
    +		klog.Infof("lock has expired: %v", le.config.Lock.Describe())
    +		return false
    +	}
    +
    +	if !le.IsLeader() {
    +		klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe())
    +		return false
    +	}
    +
    +	// 2b. If the lease has been marked as "end of term", don't renew it
    +	if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" {
    +		klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe())
    +		// TODO: Instead of letting lease expire, the holder may deleted it directly
    +		// This will not be compatible with all controllers, so it needs to be opt-in behavior.
    +		// We must ensure all code guarded by this lease has successfully completed
    +		// prior to releasing or there may be two processes
    +		// simultaneously acting on the critical path.
    +		// Usually once this returns false, the process is terminated..
    +		// xref: OnStoppedLeading
    +		return false
    +	}
    +
    +	// 3. We're going to try to update. The leaderElectionRecord is set to it's default
    +	// here. Let's correct it before updating.
    +	if le.IsLeader() {
    +		leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
    +		leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
    +		leaderElectionRecord.Strategy = oldLeaderElectionRecord.Strategy
    +		le.metrics.slowpathExercised(le.config.Name)
    +	} else {
    +		leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
    +	}
    +
    +	// update the lock itself
    +	if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {
    +		klog.Errorf("Failed to update lock: %v", err)
    +		return false
    +	}
    +
    +	le.setObservedRecord(&leaderElectionRecord)
    +	return true
    +}
    +
     // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
     // else it tries to renew the lease if it has already been acquired. Returns true
     // on success else returns false.
    diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
    new file mode 100644
    index 0000000000..74cf5bb5c2
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
    @@ -0,0 +1,202 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package leaderelection
    +
    +import (
    +	"context"
    +	"reflect"
    +	"time"
    +
    +	v1 "k8s.io/api/coordination/v1"
    +	v1alpha1 "k8s.io/api/coordination/v1alpha1"
    +	apierrors "k8s.io/apimachinery/pkg/api/errors"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/fields"
    +	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    +	"k8s.io/client-go/informers"
    +	"k8s.io/client-go/kubernetes"
    +	coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
    +	"k8s.io/client-go/tools/cache"
    +	"k8s.io/client-go/util/workqueue"
    +	"k8s.io/klog/v2"
    +	"k8s.io/utils/clock"
    +)
    +
    +const requeueInterval = 5 * time.Minute
    +
    +type CacheSyncWaiter interface {
    +	WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
    +}
    +
    +type LeaseCandidate struct {
    +	leaseClient            coordinationv1alpha1client.LeaseCandidateInterface
    +	leaseCandidateInformer cache.SharedIndexInformer
    +	informerFactory        informers.SharedInformerFactory
    +	hasSynced              cache.InformerSynced
    +
    +	// At most there will be one item in this Queue (since we only watch one item)
    +	queue workqueue.TypedRateLimitingInterface[int]
    +
    +	name      string
    +	namespace string
    +
    +	// controller lease
    +	leaseName string
    +
    +	clock clock.Clock
    +
    +	binaryVersion, emulationVersion string
    +	preferredStrategies             []v1.CoordinatedLeaseStrategy
    +}
    +
    +// NewCandidate creates new LeaseCandidate controller that creates a
    +// LeaseCandidate object if it does not exist and watches changes
    +// to the corresponding object and renews if PingTime is set.
    +// WARNING: This is an ALPHA feature. Ensure that the CoordinatedLeaderElection
    +// feature gate is on.
    +func NewCandidate(clientset kubernetes.Interface,
    +	candidateNamespace string,
    +	candidateName string,
    +	targetLease string,
    +	binaryVersion, emulationVersion string,
    +	preferredStrategies []v1.CoordinatedLeaseStrategy,
    +) (*LeaseCandidate, CacheSyncWaiter, error) {
    +	fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String()
    +	// A separate informer factory is required because this must start before informerFactories
    +	// are started for leader elected components
    +	informerFactory := informers.NewSharedInformerFactoryWithOptions(
    +		clientset, 5*time.Minute,
    +		informers.WithTweakListOptions(func(options *metav1.ListOptions) {
    +			options.FieldSelector = fieldSelector
    +		}),
    +	)
    +	leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer()
    +
    +	lc := &LeaseCandidate{
    +		leaseClient:            clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace),
    +		leaseCandidateInformer: leaseCandidateInformer,
    +		informerFactory:        informerFactory,
    +		name:                   candidateName,
    +		namespace:              candidateNamespace,
    +		leaseName:              targetLease,
    +		clock:                  clock.RealClock{},
    +		binaryVersion:          binaryVersion,
    +		emulationVersion:       emulationVersion,
    +		preferredStrategies:    preferredStrategies,
    +	}
    +	lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"})
    +
    +	h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
    +		UpdateFunc: func(oldObj, newObj interface{}) {
    +			if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok {
    +				if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) {
    +					lc.enqueueLease()
    +				}
    +			}
    +		},
    +	})
    +	if err != nil {
    +		return nil, nil, err
    +	}
    +	lc.hasSynced = h.HasSynced
    +
    +	return lc, informerFactory, nil
    +}
    +
    +func (c *LeaseCandidate) Run(ctx context.Context) {
    +	defer c.queue.ShutDown()
    +
    +	c.informerFactory.Start(ctx.Done())
    +	if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) {
    +		return
    +	}
    +
    +	c.enqueueLease()
    +	go c.runWorker(ctx)
    +	<-ctx.Done()
    +}
    +
    +func (c *LeaseCandidate) runWorker(ctx context.Context) {
    +	for c.processNextWorkItem(ctx) {
    +	}
    +}
    +
    +func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool {
    +	key, shutdown := c.queue.Get()
    +	if shutdown {
    +		return false
    +	}
    +	defer c.queue.Done(key)
    +
    +	err := c.ensureLease(ctx)
    +	if err == nil {
    +		c.queue.AddAfter(key, requeueInterval)
    +		return true
    +	}
    +
    +	utilruntime.HandleError(err)
    +	c.queue.AddRateLimited(key)
    +
    +	return true
    +}
    +
    +func (c *LeaseCandidate) enqueueLease() {
    +	c.queue.Add(0)
    +}
    +
    +// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and
    +// a bool (true if this call created the lease), or any error that occurs.
    +func (c *LeaseCandidate) ensureLease(ctx context.Context) error {
    +	lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{})
    +	if apierrors.IsNotFound(err) {
    +		klog.V(2).Infof("Creating lease candidate")
    +		// lease does not exist, create it.
    +		leaseToCreate := c.newLeaseCandidate()
    +		if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil {
    +			return err
    +		}
    +		klog.V(2).Infof("Created lease candidate")
    +		return nil
    +	} else if err != nil {
    +		return err
    +	}
    +	klog.V(2).Infof("lease candidate exists. Renewing.")
    +	clone := lease.DeepCopy()
    +	clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
    +	_, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{})
    +	if err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate {
    +	lc := &v1alpha1.LeaseCandidate{
    +		ObjectMeta: metav1.ObjectMeta{
    +			Name:      c.name,
    +			Namespace: c.namespace,
    +		},
    +		Spec: v1alpha1.LeaseCandidateSpec{
    +			LeaseName:           c.leaseName,
    +			BinaryVersion:       c.binaryVersion,
    +			EmulationVersion:    c.emulationVersion,
    +			PreferredStrategies: c.preferredStrategies,
    +		},
    +	}
    +	lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
    +	return lc
    +}
    diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
    index 483753d632..053a7570d7 100644
    --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
    +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
    @@ -19,14 +19,15 @@ package resourcelock
     import (
     	"context"
     	"fmt"
    -	clientset "k8s.io/client-go/kubernetes"
    -	restclient "k8s.io/client-go/rest"
     	"time"
     
    +	v1 "k8s.io/api/coordination/v1"
     	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
     	"k8s.io/apimachinery/pkg/runtime"
    +	clientset "k8s.io/client-go/kubernetes"
     	coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
     	corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
    +	restclient "k8s.io/client-go/rest"
     )
     
     const (
    @@ -114,11 +115,13 @@ type LeaderElectionRecord struct {
     	// attempt to acquire leases with empty identities and will wait for the full lease
     	// interval to expire before attempting to reacquire. This value is set to empty when
     	// a client voluntarily steps down.
    -	HolderIdentity       string      `json:"holderIdentity"`
    -	LeaseDurationSeconds int         `json:"leaseDurationSeconds"`
    -	AcquireTime          metav1.Time `json:"acquireTime"`
    -	RenewTime            metav1.Time `json:"renewTime"`
    -	LeaderTransitions    int         `json:"leaderTransitions"`
    +	HolderIdentity       string                      `json:"holderIdentity"`
    +	LeaseDurationSeconds int                         `json:"leaseDurationSeconds"`
    +	AcquireTime          metav1.Time                 `json:"acquireTime"`
    +	RenewTime            metav1.Time                 `json:"renewTime"`
    +	LeaderTransitions    int                         `json:"leaderTransitions"`
    +	Strategy             v1.CoordinatedLeaseStrategy `json:"strategy"`
    +	PreferredHolder      string                      `json:"preferredHolder"`
     }
     
     // EventRecorder records a change in the ResourceLock.
    diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
    index 8a9d7d60f2..7cd2a8b9ca 100644
    --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
    +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
    @@ -122,6 +122,12 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
     	if spec.RenewTime != nil {
     		r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
     	}
    +	if spec.PreferredHolder != nil {
    +		r.PreferredHolder = *spec.PreferredHolder
    +	}
    +	if spec.Strategy != nil {
    +		r.Strategy = *spec.Strategy
    +	}
     	return &r
     
     }
    @@ -129,11 +135,18 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
     func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
     	leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
     	leaseTransitions := int32(ler.LeaderTransitions)
    -	return coordinationv1.LeaseSpec{
    +	spec := coordinationv1.LeaseSpec{
     		HolderIdentity:       &ler.HolderIdentity,
     		LeaseDurationSeconds: &leaseDurationSeconds,
     		AcquireTime:          &metav1.MicroTime{Time: ler.AcquireTime.Time},
     		RenewTime:            &metav1.MicroTime{Time: ler.RenewTime.Time},
     		LeaseTransitions:     &leaseTransitions,
     	}
    +	if ler.PreferredHolder != "" {
    +		spec.PreferredHolder = &ler.PreferredHolder
    +	}
    +	if ler.Strategy != "" {
    +		spec.Strategy = &ler.Strategy
    +	}
    +	return spec
     }
    diff --git a/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go b/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go
    index 8fb74a4185..7fcc2492bf 100644
    --- a/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go
    +++ b/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go
    @@ -21,21 +21,21 @@ import (
     	"k8s.io/klog/v2"
     )
     
    -var _ httpstream.Dialer = &fallbackDialer{}
    +var _ httpstream.Dialer = &FallbackDialer{}
     
    -// fallbackDialer encapsulates a primary and secondary dialer, including
    +// FallbackDialer encapsulates a primary and secondary dialer, including
     // the boolean function to determine if the primary dialer failed. Implements
     // the httpstream.Dialer interface.
    -type fallbackDialer struct {
    +type FallbackDialer struct {
     	primary        httpstream.Dialer
     	secondary      httpstream.Dialer
     	shouldFallback func(error) bool
     }
     
    -// NewFallbackDialer creates the fallbackDialer with the primary and secondary dialers,
    +// NewFallbackDialer creates the FallbackDialer with the primary and secondary dialers,
     // as well as the boolean function to determine if the primary dialer failed.
     func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func(error) bool) httpstream.Dialer {
    -	return &fallbackDialer{
    +	return &FallbackDialer{
     		primary:        primary,
     		secondary:      secondary,
     		shouldFallback: shouldFallback,
    @@ -47,7 +47,7 @@ func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func
     // httstream.Connection and the negotiated protocol version accepted. If the initial
     // primary dialer fails, this function attempts the secondary dialer. Returns an error
     // if one occurs.
    -func (f *fallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
    +func (f *FallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
     	conn, version, err := f.primary.Dial(protocols...)
     	if err != nil && f.shouldFallback(err) {
     		klog.V(4).Infof("fallback to secondary dialer from primary dialer err: %v", err)
    diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go
    index 0745fb4a35..55947d2094 100644
    --- a/vendor/k8s.io/client-go/tools/record/event.go
    +++ b/vendor/k8s.io/client-go/tools/record/event.go
    @@ -203,8 +203,8 @@ func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster {
     	// - The context was nil.
     	// - The context was context.Background() to begin with.
     	//
    -	// Both cases get checked here.
    -	haveCtxCancelation := ctx.Done() == nil
    +	// Both cases get checked here: we have cancelation if (and only if) there is a channel.
    +	haveCtxCancelation := ctx.Done() != nil
     
     	eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx)
     
    @@ -395,7 +395,11 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
     func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
     	watcher, err := e.Watch()
     	if err != nil {
    +		// This function traditionally returns no error even though it can fail.
    +		// Instead, it logs the error and returns an empty watch. The empty
    +		// watch ensures that callers don't crash when calling Stop.
     		klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)")
    +		return watch.NewEmptyWatch()
     	}
     	go func() {
     		defer utilruntime.HandleCrash()
    diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go
    index dc22b6ec4c..e76f65812d 100644
    --- a/vendor/k8s.io/client-go/transport/cert_rotation.go
    +++ b/vendor/k8s.io/client-go/transport/cert_rotation.go
    @@ -47,14 +47,17 @@ type dynamicClientCert struct {
     	connDialer *connrotation.Dialer
     
     	// queue only ever has one item, but it has nice error handling backoff/retry semantics
    -	queue workqueue.RateLimitingInterface
    +	queue workqueue.TypedRateLimitingInterface[string]
     }
     
     func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
     	d := &dynamicClientCert{
     		reload:     reload,
     		connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
    -		queue:      workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"),
    +		queue: workqueue.NewTypedRateLimitingQueueWithConfig(
    +			workqueue.DefaultTypedControllerRateLimiter[string](),
    +			workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"},
    +		),
     	}
     
     	return d
    diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go
    index 624dd5473a..8286a8eb52 100644
    --- a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go
    +++ b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go
    @@ -111,6 +111,10 @@ func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response
     	wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header)
     	if err != nil {
     		if errors.Is(err, gwebsocket.ErrBadHandshake) {
    +			// Enhance the error message with the response status if possible.
    +			if resp != nil && len(resp.Status) > 0 {
    +				err = fmt.Errorf("%w (%s)", err, resp.Status)
    +			}
     			return nil, &httpstream.UpgradeFailureError{Cause: err}
     		}
     		return nil, err
    diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
    new file mode 100644
    index 0000000000..b33d08032f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
    @@ -0,0 +1,146 @@
    +/*
    +Copyright 2023 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package consistencydetector
    +
    +import (
    +	"context"
    +	"fmt"
    +	"sort"
    +	"time"
    +
    +	"github.com/google/go-cmp/cmp"
    +
    +	"k8s.io/apimachinery/pkg/api/meta"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	"k8s.io/apimachinery/pkg/util/wait"
    +	"k8s.io/klog/v2"
    +)
    +
    +type RetrieveItemsFunc[U any] func() []U
    +
    +type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error)
    +
    +// CheckDataConsistency exists solely for testing purposes.
    +// we cannot use checkWatchListDataConsistencyIfRequested because
    +// it is guarded by an environmental variable.
    +// we cannot manipulate the environmental variable because
    +// it will affect other tests in this package.
    +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
    +	if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) {
    +		klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity)
    +		return
    +	}
    +	klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity)
    +
    +	retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn())
    +	listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems))
    +	var list runtime.Object
    +	err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) {
    +		list, err = listFn(ctx, listOptions)
    +		if err != nil {
    +			// the consistency check will only be enabled in the CI
    +			// and LIST calls in general will be retired by the client-go library
    +			// if we fail simply log and retry
    +			klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err)
    +			return false, nil
    +		}
    +		return true, nil
    +	})
    +	if err != nil {
    +		klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err)
    +		return
    +	}
    +
    +	rawListItems, err := meta.ExtractListWithAlloc(list)
    +	if err != nil {
    +		panic(err) // this should never happen
    +	}
    +	listItems := toMetaObjectSliceOrDie(rawListItems)
    +
    +	sort.Sort(byUID(listItems))
    +	sort.Sort(byUID(retrievedItems))
    +
    +	if !cmp.Equal(listItems, retrievedItems) {
    +		klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems))
    +		msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity)
    +		panic(msg)
    +	}
    +}
    +
    +// canFormAdditionalListCall ensures that we can form a valid LIST requests
    +// for checking data consistency.
    +func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool {
    +	// since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact
    +	// we need to make sure that the continuation hasn't been set
    +	// https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38
    +	if len(listOptions.Continue) > 0 {
    +		return false
    +	}
    +
    +	// since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact
    +	// we need to make sure that the RV is valid because the validation code forbids RV == "0"
    +	// https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44
    +	if lastSyncedResourceVersion == "0" {
    +		return false
    +	}
    +
    +	return true
    +}
    +
    +// prepareListCallOptions changes the input list options so that
    +// the list call goes directly to etcd
    +func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions {
    +	// this is our legacy case:
    +	//
    +	// the watch cache skips the Limit if the ResourceVersion was set to "0"
    +	// thus, to compare with data retrieved directly from etcd
    +	// we need to skip the limit to for the list call as well.
    +	//
    +	// note that when the number of retrieved items is less than the request limit,
    +	// it means either the watch cache is disabled, or there is not enough data.
    +	// in both cases, we can use the limit because we will be able to compare
    +	// the data with the items retrieved from etcd.
    +	if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit {
    +		listOptions.Limit = 0
    +	}
    +
    +	// set the RV and RVM so that we get the snapshot of data
    +	// directly from etcd.
    +	listOptions.ResourceVersion = lastSyncedResourceVersion
    +	listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact
    +
    +	return listOptions
    +}
    +
    +type byUID []metav1.Object
    +
    +func (a byUID) Len() int           { return len(a) }
    +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() }
    +func (a byUID) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
    +
    +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object {
    +	result := make([]metav1.Object, len(s))
    +	for i, v := range s {
    +		m, err := meta.Accessor(v)
    +		if err != nil {
    +			panic(err)
    +		}
    +		result[i] = m
    +	}
    +	return result
    +}
    diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
    new file mode 100644
    index 0000000000..7610c05c28
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
    @@ -0,0 +1,70 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package consistencydetector
    +
    +import (
    +	"context"
    +	"os"
    +	"strconv"
    +
    +	"k8s.io/apimachinery/pkg/api/meta"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +var dataConsistencyDetectionForListFromCacheEnabled = false
    +
    +func init() {
    +	dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR"))
    +}
    +
    +// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when
    +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup
    +// for requests that have a high chance of being served from the watch-cache.
    +//
    +// The consistency check is meant to be enforced only in the CI, not in production.
    +// The check ensures that data retrieved by a list api call from the watch-cache
    +// is exactly the same as data received by the list api call from etcd.
    +//
    +// Note that this function will panic when data inconsistency is detected.
    +// This is intentional because we want to catch it in the CI.
    +//
    +// Note that this function doesn't examine the ListOptions to determine
    +// if the original request has hit the cache because it would be challenging
    +// to maintain consistency with the server-side implementation.
    +// For simplicity, we assume that the first request retrieved data from
    +// the cache (even though this might not be true for some requests)
    +// and issue the second call to get data from etcd for comparison.
    +func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
    +	if !dataConsistencyDetectionForListFromCacheEnabled {
    +		return
    +	}
    +	checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
    +}
    +
    +func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
    +	receivedListMeta, err := meta.ListAccessor(receivedList)
    +	if err != nil {
    +		panic(err)
    +	}
    +	rawListItems, err := meta.ExtractListWithAlloc(receivedList)
    +	if err != nil {
    +		panic(err) // this should never happen
    +	}
    +	lastSyncedResourceVersion := receivedListMeta.GetResourceVersion()
    +	CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems })
    +}
    diff --git a/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go
    new file mode 100644
    index 0000000000..cda5fc205f
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go
    @@ -0,0 +1,54 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package consistencydetector
    +
    +import (
    +	"context"
    +	"os"
    +	"strconv"
    +
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +)
    +
    +var dataConsistencyDetectionForWatchListEnabled = false
    +
    +func init() {
    +	dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"))
    +}
    +
    +// IsDataConsistencyDetectionForWatchListEnabled returns true when
    +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
    +func IsDataConsistencyDetectionForWatchListEnabled() bool {
    +	return dataConsistencyDetectionForWatchListEnabled
    +}
    +
    +// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when
    +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
    +//
    +// The consistency check is meant to be enforced only in the CI, not in production.
    +// The check ensures that data retrieved by the watch-list api call
    +// is exactly the same as data received by the standard list api call against etcd.
    +//
    +// Note that this function will panic when data inconsistency is detected.
    +// This is intentional because we want to catch it in the CI.
    +func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
    +	if !IsDataConsistencyDetectionForWatchListEnabled() {
    +		return
    +	}
    +	checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
    +}
    diff --git a/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/vendor/k8s.io/client-go/util/watchlist/watch_list.go
    new file mode 100644
    index 0000000000..84106458a5
    --- /dev/null
    +++ b/vendor/k8s.io/client-go/util/watchlist/watch_list.go
    @@ -0,0 +1,82 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package watchlist
    +
    +import (
    +	metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
    +	metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
    +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    +	"k8s.io/apimachinery/pkg/runtime"
    +	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    +	clientfeatures "k8s.io/client-go/features"
    +	"k8s.io/utils/ptr"
    +)
    +
    +var scheme = runtime.NewScheme()
    +
    +func init() {
    +	utilruntime.Must(metainternalversion.AddToScheme(scheme))
    +}
    +
    +// PrepareWatchListOptionsFromListOptions creates a new ListOptions
    +// that can be used for a watch-list request from the given listOptions.
    +//
    +// This function also determines if the given listOptions can be used to form a watch-list request,
    +// which would result in streaming semantically equivalent data from the server.
    +func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) {
    +	if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
    +		return metav1.ListOptions{}, false, nil
    +	}
    +
    +	internalListOptions := &metainternalversion.ListOptions{}
    +	if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil {
    +		return metav1.ListOptions{}, false, err
    +	}
    +	if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 {
    +		return metav1.ListOptions{}, false, nil
    +	}
    +
    +	watchListOptions := listOptions
    +	// this is our legacy case, the cache ignores LIMIT for
    +	// ResourceVersion == 0 and RVM=unset|NotOlderThan
    +	if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" {
    +		return metav1.ListOptions{}, false, nil
    +	}
    +	watchListOptions.Limit = 0
    +
    +	// to ensure that we can create a watch-list request that returns
    +	// semantically equivalent data for the given listOptions,
    +	// we need to validate that the RVM for the list is supported by watch-list requests.
    +	if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
    +		return metav1.ListOptions{}, false, nil
    +	}
    +	watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan
    +
    +	watchListOptions.Watch = true
    +	watchListOptions.AllowWatchBookmarks = true
    +	watchListOptions.SendInitialEvents = ptr.To(true)
    +
    +	internalWatchListOptions := &metainternalversion.ListOptions{}
    +	if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil {
    +		return metav1.ListOptions{}, false, err
    +	}
    +	if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 {
    +		return metav1.ListOptions{}, false, nil
    +	}
    +
    +	return watchListOptions, true, nil
    +}
    diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
    index efda7c197f..1f9567881c 100644
    --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
    +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
    @@ -24,49 +24,66 @@ import (
     	"golang.org/x/time/rate"
     )
     
    -type RateLimiter interface {
    +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead.
    +type RateLimiter TypedRateLimiter[any]
    +
    +type TypedRateLimiter[T comparable] interface {
     	// When gets an item and gets to decide how long that item should wait
    -	When(item interface{}) time.Duration
    +	When(item T) time.Duration
     	// Forget indicates that an item is finished being retried.  Doesn't matter whether it's for failing
     	// or for success, we'll stop tracking it
    -	Forget(item interface{})
    +	Forget(item T)
     	// NumRequeues returns back how many failures the item has had
    -	NumRequeues(item interface{}) int
    +	NumRequeues(item T) int
     }
     
     // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
     // both overall and per-item rate limiting.  The overall is a token bucket and the per-item is exponential
    +//
    +// Deprecated: Use DefaultTypedControllerRateLimiter instead.
     func DefaultControllerRateLimiter() RateLimiter {
    -	return NewMaxOfRateLimiter(
    -		NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
    +	return DefaultTypedControllerRateLimiter[any]()
    +}
    +
    +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
    +// both overall and per-item rate limiting.  The overall is a token bucket and the per-item is exponential
    +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] {
    +	return NewTypedMaxOfRateLimiter(
    +		NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second),
     		// 10 qps, 100 bucket size.  This is only for retry speed and its only the overall factor (not per item)
    -		&BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
    +		&TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
     	)
     }
     
    -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
    -type BucketRateLimiter struct {
    +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead.
    +type BucketRateLimiter = TypedBucketRateLimiter[any]
    +
    +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
    +type TypedBucketRateLimiter[T comparable] struct {
     	*rate.Limiter
     }
     
     var _ RateLimiter = &BucketRateLimiter{}
     
    -func (r *BucketRateLimiter) When(item interface{}) time.Duration {
    +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration {
     	return r.Limiter.Reserve().Delay()
     }
     
    -func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
    +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int {
     	return 0
     }
     
    -func (r *BucketRateLimiter) Forget(item interface{}) {
    +func (r *TypedBucketRateLimiter[T]) Forget(item T) {
     }
     
    -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit
    +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead.
    +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any]
    +
    +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit
     // dealing with max failures and expiration are up to the caller
    -type ItemExponentialFailureRateLimiter struct {
    +type TypedItemExponentialFailureRateLimiter[T comparable] struct {
     	failuresLock sync.Mutex
    -	failures     map[interface{}]int
    +	failures     map[T]int
     
     	baseDelay time.Duration
     	maxDelay  time.Duration
    @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct {
     
     var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
     
    +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead.
     func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
    -	return &ItemExponentialFailureRateLimiter{
    -		failures:  map[interface{}]int{},
    +	return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay)
    +}
    +
    +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] {
    +	return &TypedItemExponentialFailureRateLimiter[T]{
    +		failures:  map[T]int{},
     		baseDelay: baseDelay,
     		maxDelay:  maxDelay,
     	}
     }
     
    +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead.
     func DefaultItemBasedRateLimiter() RateLimiter {
    -	return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
    +	return DefaultTypedItemBasedRateLimiter[any]()
     }
     
    -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
    +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] {
    +	return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second)
    +}
    +
    +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
    @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration
     	return calculated
     }
     
    -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
    +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
     	return r.failures[item]
     }
     
    -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
    +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
    @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
     }
     
     // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
    -type ItemFastSlowRateLimiter struct {
    +// Deprecated: Use TypedItemFastSlowRateLimiter instead.
    +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any]
    +
    +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
    +type TypedItemFastSlowRateLimiter[T comparable] struct {
     	failuresLock sync.Mutex
    -	failures     map[interface{}]int
    +	failures     map[T]int
     
     	maxFastAttempts int
     	fastDelay       time.Duration
    @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct {
     
     var _ RateLimiter = &ItemFastSlowRateLimiter{}
     
    +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead.
     func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
    -	return &ItemFastSlowRateLimiter{
    -		failures:        map[interface{}]int{},
    +	return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts)
    +}
    +
    +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] {
    +	return &TypedItemFastSlowRateLimiter[T]{
    +		failures:        map[T]int{},
     		fastDelay:       fastDelay,
     		slowDelay:       slowDelay,
     		maxFastAttempts: maxFastAttempts,
     	}
     }
     
    -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
    +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
    @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
     	return r.slowDelay
     }
     
    -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
    +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
     	return r.failures[item]
     }
     
    -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
    +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) {
     	r.failuresLock.Lock()
     	defer r.failuresLock.Unlock()
     
    @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
     // MaxOfRateLimiter calls every RateLimiter and returns the worst case response
     // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
     // were separately delayed a longer time.
    -type MaxOfRateLimiter struct {
    -	limiters []RateLimiter
    +//
    +// Deprecated: Use TypedMaxOfRateLimiter instead.
    +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any]
    +
    +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response
    +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
    +// were separately delayed a longer time.
    +type TypedMaxOfRateLimiter[T comparable] struct {
    +	limiters []TypedRateLimiter[T]
     }
     
    -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
    +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration {
     	ret := time.Duration(0)
     	for _, limiter := range r.limiters {
     		curr := limiter.When(item)
    @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
     	return ret
     }
     
    -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
    -	return &MaxOfRateLimiter{limiters: limiters}
    +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead.
    +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter {
    +	return NewTypedMaxOfRateLimiter(limiters...)
     }
     
    -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
    +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] {
    +	return &TypedMaxOfRateLimiter[T]{limiters: limiters}
    +}
    +
    +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int {
     	ret := 0
     	for _, limiter := range r.limiters {
     		curr := limiter.NumRequeues(item)
    @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
     	return ret
     }
     
    -func (r *MaxOfRateLimiter) Forget(item interface{}) {
    +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) {
     	for _, limiter := range r.limiters {
     		limiter.Forget(item)
     	}
     }
     
     // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
    -type WithMaxWaitRateLimiter struct {
    -	limiter  RateLimiter
    +// Deprecated: Use TypedWithMaxWaitRateLimiter instead.
    +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any]
    +
    +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long
    +type TypedWithMaxWaitRateLimiter[T comparable] struct {
    +	limiter  TypedRateLimiter[T]
     	maxDelay time.Duration
     }
     
    +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead.
     func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
    -	return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay}
    +	return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay)
    +}
    +
    +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] {
    +	return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay}
     }
     
    -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
    +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration {
     	delay := w.limiter.When(item)
     	if delay > w.maxDelay {
     		return w.maxDelay
    @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
     	return delay
     }
     
    -func (w WithMaxWaitRateLimiter) Forget(item interface{}) {
    +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) {
     	w.limiter.Forget(item)
     }
     
    -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int {
    +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int {
     	return w.limiter.NumRequeues(item)
     }
    diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
    index c1df720302..958b96a80c 100644
    --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
    +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
    @@ -27,14 +27,25 @@ import (
     
     // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
     // requeue items after failures without ending up in a hot-loop.
    -type DelayingInterface interface {
    -	Interface
    +//
    +// Deprecated: use TypedDelayingInterface instead.
    +type DelayingInterface TypedDelayingInterface[any]
    +
    +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
    +// requeue items after failures without ending up in a hot-loop.
    +type TypedDelayingInterface[T comparable] interface {
    +	TypedInterface[T]
     	// AddAfter adds an item to the workqueue after the indicated duration has passed
    -	AddAfter(item interface{}, duration time.Duration)
    +	AddAfter(item T, duration time.Duration)
     }
     
     // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
    -type DelayingQueueConfig struct {
    +//
    +// Deprecated: use TypedDelayingQueueConfig instead.
    +type DelayingQueueConfig = TypedDelayingQueueConfig[any]
    +
    +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
    +type TypedDelayingQueueConfig[T comparable] struct {
     	// Name for the queue. If unnamed, the metrics will not be registered.
     	Name string
     
    @@ -46,25 +57,42 @@ type DelayingQueueConfig struct {
     	Clock clock.WithTicker
     
     	// Queue optionally allows injecting custom queue Interface instead of the default one.
    -	Queue Interface
    +	Queue TypedInterface[T]
     }
     
     // NewDelayingQueue constructs a new workqueue with delayed queuing ability.
     // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
     // NewDelayingQueueWithConfig instead and specify a name.
    +//
    +// Deprecated: use TypedNewDelayingQueue instead.
     func NewDelayingQueue() DelayingInterface {
     	return NewDelayingQueueWithConfig(DelayingQueueConfig{})
     }
     
    +// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability.
    +// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
    +// TypedNewDelayingQueueWithConfig instead and specify a name.
    +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
    +	return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
    +}
    +
     // NewDelayingQueueWithConfig constructs a new workqueue with options to
     // customize different properties.
    +//
    +// Deprecated: use TypedNewDelayingQueueWithConfig instead.
     func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
    +	return NewTypedDelayingQueueWithConfig[any](config)
    +}
    +
    +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
    +// customize different properties.
    +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
     	if config.Clock == nil {
     		config.Clock = clock.RealClock{}
     	}
     
     	if config.Queue == nil {
    -		config.Queue = NewWithConfig(QueueConfig{
    +		config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{
     			Name:            config.Name,
     			MetricsProvider: config.MetricsProvider,
     			Clock:           config.Clock,
    @@ -100,9 +128,9 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi
     	})
     }
     
    -func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType {
    -	ret := &delayingType{
    -		Interface:       q,
    +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] {
    +	ret := &delayingType[T]{
    +		TypedInterface:  q,
     		clock:           clock,
     		heartbeat:       clock.NewTicker(maxWait),
     		stopCh:          make(chan struct{}),
    @@ -115,8 +143,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider
     }
     
     // delayingType wraps an Interface and provides delayed re-enquing
    -type delayingType struct {
    -	Interface
    +type delayingType[T comparable] struct {
    +	TypedInterface[T]
     
     	// clock tracks time for delayed firing
     	clock clock.Clock
    @@ -193,16 +221,16 @@ func (pq waitForPriorityQueue) Peek() interface{} {
     
     // ShutDown stops the queue. After the queue drains, the returned shutdown bool
     // on Get() will be true. This method may be invoked more than once.
    -func (q *delayingType) ShutDown() {
    +func (q *delayingType[T]) ShutDown() {
     	q.stopOnce.Do(func() {
    -		q.Interface.ShutDown()
    +		q.TypedInterface.ShutDown()
     		close(q.stopCh)
     		q.heartbeat.Stop()
     	})
     }
     
     // AddAfter adds the given item to the work queue after the given delay
    -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
    +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
     	// don't add if we're already shutting down
     	if q.ShuttingDown() {
     		return
    @@ -229,7 +257,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
     const maxWait = 10 * time.Second
     
     // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
    -func (q *delayingType) waitingLoop() {
    +func (q *delayingType[T]) waitingLoop() {
     	defer utilruntime.HandleCrash()
     
     	// Make a placeholder channel to use when there are no items in our list
    @@ -244,7 +272,7 @@ func (q *delayingType) waitingLoop() {
     	waitingEntryByData := map[t]*waitFor{}
     
     	for {
    -		if q.Interface.ShuttingDown() {
    +		if q.TypedInterface.ShuttingDown() {
     			return
     		}
     
    @@ -258,7 +286,7 @@ func (q *delayingType) waitingLoop() {
     			}
     
     			entry = heap.Pop(waitingForQueue).(*waitFor)
    -			q.Add(entry.data)
    +			q.Add(entry.data.(T))
     			delete(waitingEntryByData, entry.data)
     		}
     
    @@ -287,7 +315,7 @@ func (q *delayingType) waitingLoop() {
     			if waitEntry.readyAt.After(q.clock.Now()) {
     				insert(waitingForQueue, waitingEntryByData, waitEntry)
     			} else {
    -				q.Add(waitEntry.data)
    +				q.Add(waitEntry.data.(T))
     			}
     
     			drained := false
    @@ -297,7 +325,7 @@ func (q *delayingType) waitingLoop() {
     					if waitEntry.readyAt.After(q.clock.Now()) {
     						insert(waitingForQueue, waitingEntryByData, waitEntry)
     					} else {
    -						q.Add(waitEntry.data)
    +						q.Add(waitEntry.data.(T))
     					}
     				default:
     					drained = true
    diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go
    index a363d1afb4..ff715482c1 100644
    --- a/vendor/k8s.io/client-go/util/workqueue/queue.go
    +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go
    @@ -23,18 +23,66 @@ import (
     	"k8s.io/utils/clock"
     )
     
    -type Interface interface {
    -	Add(item interface{})
    +// Deprecated: Interface is deprecated, use TypedInterface instead.
    +type Interface TypedInterface[any]
    +
    +type TypedInterface[T comparable] interface {
    +	Add(item T)
     	Len() int
    -	Get() (item interface{}, shutdown bool)
    -	Done(item interface{})
    +	Get() (item T, shutdown bool)
    +	Done(item T)
     	ShutDown()
     	ShutDownWithDrain()
     	ShuttingDown() bool
     }
     
    +// Queue is the underlying storage for items. The functions below are always
    +// called from the same goroutine.
    +type Queue[T comparable] interface {
    +	// Touch can be hooked when an existing item is added again. This may be
    +	// useful if the implementation allows priority change for the given item.
    +	Touch(item T)
    +	// Push adds a new item.
    +	Push(item T)
    +	// Len tells the total number of items.
    +	Len() int
    +	// Pop retrieves an item.
    +	Pop() (item T)
    +}
    +
    +// DefaultQueue is a slice based FIFO queue.
    +func DefaultQueue[T comparable]() Queue[T] {
    +	return new(queue[T])
    +}
    +
    +// queue is a slice which implements Queue.
    +type queue[T comparable] []T
    +
    +func (q *queue[T]) Touch(item T) {}
    +
    +func (q *queue[T]) Push(item T) {
    +	*q = append(*q, item)
    +}
    +
    +func (q *queue[T]) Len() int {
    +	return len(*q)
    +}
    +
    +func (q *queue[T]) Pop() (item T) {
    +	item = (*q)[0]
    +
    +	// The underlying array still exists and reference this object, so the object will not be garbage collected.
    +	(*q)[0] = *new(T)
    +	*q = (*q)[1:]
    +
    +	return item
    +}
    +
     // QueueConfig specifies optional configurations to customize an Interface.
    -type QueueConfig struct {
    +// Deprecated: use TypedQueueConfig instead.
    +type QueueConfig = TypedQueueConfig[any]
    +
    +type TypedQueueConfig[T comparable] struct {
     	// Name for the queue. If unnamed, the metrics will not be registered.
     	Name string
     
    @@ -44,18 +92,38 @@ type QueueConfig struct {
     
     	// Clock ability to inject real or fake clock for testing purposes.
     	Clock clock.WithTicker
    +
    +	// Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue.
    +	Queue Queue[T]
     }
     
     // New constructs a new work queue (see the package comment).
    +//
    +// Deprecated: use NewTyped instead.
     func New() *Type {
     	return NewWithConfig(QueueConfig{
     		Name: "",
     	})
     }
     
    +// NewTyped constructs a new work queue (see the package comment).
    +func NewTyped[T comparable]() *Typed[T] {
    +	return NewTypedWithConfig(TypedQueueConfig[T]{
    +		Name: "",
    +	})
    +}
    +
     // NewWithConfig constructs a new workqueue with ability to
     // customize different properties.
    +//
    +// Deprecated: use NewTypedWithConfig instead.
     func NewWithConfig(config QueueConfig) *Type {
    +	return NewTypedWithConfig(config)
    +}
    +
    +// NewTypedWithConfig constructs a new workqueue with ability to
    +// customize different properties.
    +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] {
     	return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod)
     }
     
    @@ -69,7 +137,7 @@ func NewNamed(name string) *Type {
     
     // newQueueWithConfig constructs a new named workqueue
     // with the ability to customize different properties for testing purposes
    -func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type {
    +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
     	var metricsFactory *queueMetricsFactory
     	if config.MetricsProvider != nil {
     		metricsFactory = &queueMetricsFactory{
    @@ -83,18 +151,24 @@ func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type {
     		config.Clock = clock.RealClock{}
     	}
     
    +	if config.Queue == nil {
    +		config.Queue = DefaultQueue[T]()
    +	}
    +
     	return newQueue(
     		config.Clock,
    +		config.Queue,
     		metricsFactory.newQueueMetrics(config.Name, config.Clock),
     		updatePeriod,
     	)
     }
     
    -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
    -	t := &Type{
    +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] {
    +	t := &Typed[T]{
     		clock:                      c,
    -		dirty:                      set{},
    -		processing:                 set{},
    +		queue:                      queue,
    +		dirty:                      set[T]{},
    +		processing:                 set[T]{},
     		cond:                       sync.NewCond(&sync.Mutex{}),
     		metrics:                    metrics,
     		unfinishedWorkUpdatePeriod: updatePeriod,
    @@ -112,20 +186,23 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati
     const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
     
     // Type is a work queue (see the package comment).
    -type Type struct {
    +// Deprecated: Use Typed instead.
    +type Type = Typed[any]
    +
    +type Typed[t comparable] struct {
     	// queue defines the order in which we will work on items. Every
     	// element of queue should be in the dirty set and not in the
     	// processing set.
    -	queue []t
    +	queue Queue[t]
     
     	// dirty defines all of the items that need to be processed.
    -	dirty set
    +	dirty set[t]
     
     	// Things that are currently being processed are in the processing set.
     	// These things may be simultaneously in the dirty set. When we finish
     	// processing something and remove it from this set, we'll check if
     	// it's in the dirty set, and if so, add it to the queue.
    -	processing set
    +	processing set[t]
     
     	cond *sync.Cond
     
    @@ -140,33 +217,38 @@ type Type struct {
     
     type empty struct{}
     type t interface{}
    -type set map[t]empty
    +type set[t comparable] map[t]empty
     
    -func (s set) has(item t) bool {
    +func (s set[t]) has(item t) bool {
     	_, exists := s[item]
     	return exists
     }
     
    -func (s set) insert(item t) {
    +func (s set[t]) insert(item t) {
     	s[item] = empty{}
     }
     
    -func (s set) delete(item t) {
    +func (s set[t]) delete(item t) {
     	delete(s, item)
     }
     
    -func (s set) len() int {
    +func (s set[t]) len() int {
     	return len(s)
     }
     
     // Add marks item as needing processing.
    -func (q *Type) Add(item interface{}) {
    +func (q *Typed[T]) Add(item T) {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
     	if q.shuttingDown {
     		return
     	}
     	if q.dirty.has(item) {
    +		// the same item is added again before it is processed, call the Touch
    +		// function if the queue cares about it (for e.g, reset its priority)
    +		if !q.processing.has(item) {
    +			q.queue.Touch(item)
    +		}
     		return
     	}
     
    @@ -177,37 +259,34 @@ func (q *Type) Add(item interface{}) {
     		return
     	}
     
    -	q.queue = append(q.queue, item)
    +	q.queue.Push(item)
     	q.cond.Signal()
     }
     
     // Len returns the current queue length, for informational purposes only. You
     // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
     // value, that can't be synchronized properly.
    -func (q *Type) Len() int {
    +func (q *Typed[T]) Len() int {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
    -	return len(q.queue)
    +	return q.queue.Len()
     }
     
     // Get blocks until it can return an item to be processed. If shutdown = true,
     // the caller should end their goroutine. You must call Done with item when you
     // have finished processing it.
    -func (q *Type) Get() (item interface{}, shutdown bool) {
    +func (q *Typed[T]) Get() (item T, shutdown bool) {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
    -	for len(q.queue) == 0 && !q.shuttingDown {
    +	for q.queue.Len() == 0 && !q.shuttingDown {
     		q.cond.Wait()
     	}
    -	if len(q.queue) == 0 {
    +	if q.queue.Len() == 0 {
     		// We must be shutting down.
    -		return nil, true
    +		return *new(T), true
     	}
     
    -	item = q.queue[0]
    -	// The underlying array still exists and reference this object, so the object will not be garbage collected.
    -	q.queue[0] = nil
    -	q.queue = q.queue[1:]
    +	item = q.queue.Pop()
     
     	q.metrics.get(item)
     
    @@ -220,7 +299,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) {
     // Done marks item as done processing, and if it has been marked as dirty again
     // while it was being processed, it will be re-added to the queue for
     // re-processing.
    -func (q *Type) Done(item interface{}) {
    +func (q *Typed[T]) Done(item T) {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
     
    @@ -228,7 +307,7 @@ func (q *Type) Done(item interface{}) {
     
     	q.processing.delete(item)
     	if q.dirty.has(item) {
    -		q.queue = append(q.queue, item)
    +		q.queue.Push(item)
     		q.cond.Signal()
     	} else if q.processing.len() == 0 {
     		q.cond.Signal()
    @@ -237,7 +316,7 @@ func (q *Type) Done(item interface{}) {
     
     // ShutDown will cause q to ignore all new items added to it and
     // immediately instruct the worker goroutines to exit.
    -func (q *Type) ShutDown() {
    +func (q *Typed[T]) ShutDown() {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
     
    @@ -255,7 +334,7 @@ func (q *Type) ShutDown() {
     // indefinitely. It is, however, safe to call ShutDown after having called
     // ShutDownWithDrain, as to force the queue shut down to terminate immediately
     // without waiting for the drainage.
    -func (q *Type) ShutDownWithDrain() {
    +func (q *Typed[T]) ShutDownWithDrain() {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
     
    @@ -268,14 +347,14 @@ func (q *Type) ShutDownWithDrain() {
     	}
     }
     
    -func (q *Type) ShuttingDown() bool {
    +func (q *Typed[T]) ShuttingDown() bool {
     	q.cond.L.Lock()
     	defer q.cond.L.Unlock()
     
     	return q.shuttingDown
     }
     
    -func (q *Type) updateUnfinishedWorkLoop() {
    +func (q *Typed[T]) updateUnfinishedWorkLoop() {
     	t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
     	defer t.Stop()
     	for range t.C() {
    diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
    index 3e4016fb04..fe45afa5a4 100644
    --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
    +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
    @@ -19,24 +19,33 @@ package workqueue
     import "k8s.io/utils/clock"
     
     // RateLimitingInterface is an interface that rate limits items being added to the queue.
    -type RateLimitingInterface interface {
    -	DelayingInterface
    +//
    +// Deprecated: Use TypedRateLimitingInterface instead.
    +type RateLimitingInterface TypedRateLimitingInterface[any]
    +
    +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue.
    +type TypedRateLimitingInterface[T comparable] interface {
    +	TypedDelayingInterface[T]
     
     	// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
    -	AddRateLimited(item interface{})
    +	AddRateLimited(item T)
     
     	// Forget indicates that an item is finished being retried.  Doesn't matter whether it's for perm failing
     	// or for success, we'll stop the rate limiter from tracking it.  This only clears the `rateLimiter`, you
     	// still have to call `Done` on the queue.
    -	Forget(item interface{})
    +	Forget(item T)
     
     	// NumRequeues returns back how many times the item was requeued
    -	NumRequeues(item interface{}) int
    +	NumRequeues(item T) int
     }
     
     // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface.
    +//
    +// Deprecated: Use TypedRateLimitingQueueConfig instead.
    +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any]
     
    -type RateLimitingQueueConfig struct {
    +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface.
    +type TypedRateLimitingQueueConfig[T comparable] struct {
     	// Name for the queue. If unnamed, the metrics will not be registered.
     	Name string
     
    @@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct {
     	Clock clock.WithTicker
     
     	// DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one.
    -	DelayingQueue DelayingInterface
    +	DelayingQueue TypedDelayingInterface[T]
     }
     
     // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
     // Remember to call Forget!  If you don't, you may end up tracking failures forever.
     // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
     // NewRateLimitingQueueWithConfig instead and specify a name.
    +//
    +// Deprecated: Use NewTypedRateLimitingQueue instead.
     func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
     	return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{})
     }
     
    +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
    +// Remember to call Forget!  If you don't, you may end up tracking failures forever.
    +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
    +// NewTypedRateLimitingQueueWithConfig instead and specify a name.
    +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] {
    +	return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{})
    +}
    +
     // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
     // with options to customize different properties.
     // Remember to call Forget!  If you don't, you may end up tracking failures forever.
    +//
    +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead.
     func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface {
    +	return NewTypedRateLimitingQueueWithConfig(rateLimiter, config)
    +}
    +
    +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
    +// with options to customize different properties.
    +// Remember to call Forget!  If you don't, you may end up tracking failures forever.
    +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] {
     	if config.Clock == nil {
     		config.Clock = clock.RealClock{}
     	}
     
     	if config.DelayingQueue == nil {
    -		config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{
    +		config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{
     			Name:            config.Name,
     			MetricsProvider: config.MetricsProvider,
     			Clock:           config.Clock,
     		})
     	}
     
    -	return &rateLimitingType{
    -		DelayingInterface: config.DelayingQueue,
    -		rateLimiter:       rateLimiter,
    +	return &rateLimitingType[T]{
    +		TypedDelayingInterface: config.DelayingQueue,
    +		rateLimiter:            rateLimiter,
     	}
     }
     
    @@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter
     }
     
     // rateLimitingType wraps an Interface and provides rateLimited re-enquing
    -type rateLimitingType struct {
    -	DelayingInterface
    +type rateLimitingType[T comparable] struct {
    +	TypedDelayingInterface[T]
     
    -	rateLimiter RateLimiter
    +	rateLimiter TypedRateLimiter[T]
     }
     
     // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
    -func (q *rateLimitingType) AddRateLimited(item interface{}) {
    -	q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
    +func (q *rateLimitingType[T]) AddRateLimited(item T) {
    +	q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item))
     }
     
    -func (q *rateLimitingType) NumRequeues(item interface{}) int {
    +func (q *rateLimitingType[T]) NumRequeues(item T) int {
     	return q.rateLimiter.NumRequeues(item)
     }
     
    -func (q *rateLimitingType) Forget(item interface{}) {
    +func (q *rateLimitingType[T]) Forget(item T) {
     	q.rateLimiter.Forget(item)
     }
    diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
    index 026be9e3b1..47ec9466a6 100644
    --- a/vendor/k8s.io/klog/v2/klog.go
    +++ b/vendor/k8s.io/klog/v2/klog.go
    @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
     	return nil
     }
     
    -// flushSyncWriter is the interface satisfied by logging destinations.
    -type flushSyncWriter interface {
    -	Flush() error
    -	Sync() error
    -	io.Writer
    -}
    -
     var logging loggingT
     var commandLine flag.FlagSet
     
    @@ -486,7 +479,7 @@ type settings struct {
     	// Access to all of the following fields must be protected via a mutex.
     
     	// file holds writer for each of the log types.
    -	file [severity.NumSeverity]flushSyncWriter
    +	file [severity.NumSeverity]io.Writer
     	// flushInterval is the interval for periodic flushing. If zero,
     	// the global default will be used.
     	flushInterval time.Duration
    @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
     	buffer.PutBuffer(b)
     }
     
    -// redirectBuffer is used to set an alternate destination for the logs
    -type redirectBuffer struct {
    -	w io.Writer
    -}
    -
    -func (rb *redirectBuffer) Sync() error {
    -	return nil
    -}
    -
    -func (rb *redirectBuffer) Flush() error {
    -	return nil
    -}
    -
    -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
    -	return rb.w.Write(bytes)
    -}
    -
     // SetOutput sets the output destination for all severities
     func SetOutput(w io.Writer) {
     	logging.mu.Lock()
     	defer logging.mu.Unlock()
     	for s := severity.FatalLog; s >= severity.InfoLog; s-- {
    -		rb := &redirectBuffer{
    -			w: w,
    -		}
    -		logging.file[s] = rb
    +		logging.file[s] = w
     	}
     }
     
    @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
     	if !ok {
     		panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
     	}
    -	rb := &redirectBuffer{
    -		w: w,
    -	}
    -	logging.file[sev] = rb
    +	logging.file[sev] = w
     }
     
     // LogToStderr sets whether to log exclusively to stderr, bypassing outputs
    @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) {
     		logExitFunc(err)
     		return
     	}
    -	l.flushAll()
    +	needToSync := l.flushAll()
    +	l.syncAll(needToSync)
     	OsExit(2)
     }
     
    @@ -1028,10 +999,6 @@ type syncBuffer struct {
     	maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
     }
     
    -func (sb *syncBuffer) Sync() error {
    -	return sb.file.Sync()
    -}
    -
     // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
     func CalculateMaxSize() uint64 {
     	if logging.logFile != "" {
    @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
     // lockAndFlushAll is like flushAll but locks l.mu first.
     func (l *loggingT) lockAndFlushAll() {
     	l.mu.Lock()
    -	l.flushAll()
    +	needToSync := l.flushAll()
     	l.mu.Unlock()
    +	// Some environments are slow when syncing and holding the lock might cause contention.
    +	l.syncAll(needToSync)
     }
     
    -// flushAll flushes all the logs and attempts to "sync" their data to disk.
    +// flushAll flushes all the logs
     // l.mu is held.
    -func (l *loggingT) flushAll() {
    +//
    +// The result is the number of files which need to be synced and the pointers to them.
    +func (l *loggingT) flushAll() fileArray {
    +	var needToSync fileArray
    +
     	// Flush from fatal down, in case there's trouble flushing.
     	for s := severity.FatalLog; s >= severity.InfoLog; s-- {
     		file := l.file[s]
    -		if file != nil {
    -			_ = file.Flush() // ignore error
    -			_ = file.Sync()  // ignore error
    +		if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
    +			_ = sb.Flush() // ignore error
    +			needToSync.files[needToSync.num] = sb.file
    +			needToSync.num++
     		}
     	}
     	if logging.loggerOptions.flush != nil {
     		logging.loggerOptions.flush()
     	}
    +	return needToSync
    +}
    +
    +type fileArray struct {
    +	num   int
    +	files [severity.NumSeverity]*os.File
    +}
    +
    +// syncAll attempts to "sync" their data to disk.
    +func (l *loggingT) syncAll(needToSync fileArray) {
    +	// Flush from fatal down, in case there's trouble flushing.
    +	for i := 0; i < needToSync.num; i++ {
    +		_ = needToSync.files[i].Sync() // ignore error
    +	}
     }
     
     // CopyStandardLogTo arranges for messages written to the Go "log" package's
    diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go
    new file mode 100644
    index 0000000000..7cb7795bec
    --- /dev/null
    +++ b/vendor/k8s.io/utils/net/multi_listen.go
    @@ -0,0 +1,195 @@
    +/*
    +Copyright 2024 The Kubernetes Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +package net
    +
    +import (
    +	"context"
    +	"fmt"
    +	"net"
    +	"sync"
    +)
    +
    +// connErrPair pairs conn and error which is returned by accept on sub-listeners.
    +type connErrPair struct {
    +	conn net.Conn
    +	err  error
    +}
    +
    +// multiListener implements net.Listener
    +type multiListener struct {
    +	listeners []net.Listener
    +	wg        sync.WaitGroup
    +
    +	// connCh passes accepted connections, from child listeners to parent.
    +	connCh chan connErrPair
    +	// stopCh communicates from parent to child listeners.
    +	stopCh chan struct{}
    +}
    +
    +// compile time check to ensure *multiListener implements net.Listener
    +var _ net.Listener = &multiListener{}
    +
    +// MultiListen returns net.Listener which can listen on and accept connections for
    +// the given network on multiple addresses. Internally it uses stdlib to create
    +// sub-listener and multiplexes connection requests using go-routines.
    +// The network must be "tcp", "tcp4" or "tcp6".
    +// It follows the semantics of net.Listen that primarily means:
    +//  1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen
    +//     listens on all available unicast and anycast IP addresses of the local system.
    +//  2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively.
    +//  3. The host can accept names (e.g, localhost) and it will create a listener for at
    +//     most one of the host's IP.
    +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) {
    +	var lc net.ListenConfig
    +	return multiListen(
    +		ctx,
    +		network,
    +		addrs,
    +		func(ctx context.Context, network, address string) (net.Listener, error) {
    +			return lc.Listen(ctx, network, address)
    +		})
    +}
    +
    +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing
    +// mocking for unit-testing.
    +func multiListen(
    +	ctx context.Context,
    +	network string,
    +	addrs []string,
    +	listenFunc func(ctx context.Context, network, address string) (net.Listener, error),
    +) (net.Listener, error) {
    +	if !(network == "tcp" || network == "tcp4" || network == "tcp6") {
    +		return nil, fmt.Errorf("network %q not supported", network)
    +	}
    +	if len(addrs) == 0 {
    +		return nil, fmt.Errorf("no address provided to listen on")
    +	}
    +
    +	ml := &multiListener{
    +		connCh: make(chan connErrPair),
    +		stopCh: make(chan struct{}),
    +	}
    +	for _, addr := range addrs {
    +		l, err := listenFunc(ctx, network, addr)
    +		if err != nil {
    +			// close all the sub-listeners and exit
    +			_ = ml.Close()
    +			return nil, err
    +		}
    +		ml.listeners = append(ml.listeners, l)
    +	}
    +
    +	for _, l := range ml.listeners {
    +		ml.wg.Add(1)
    +		go func(l net.Listener) {
    +			defer ml.wg.Done()
    +			for {
    +				// Accept() is blocking, unless ml.Close() is called, in which
    +				// case it will return immediately with an error.
    +				conn, err := l.Accept()
    +				// This assumes that ANY error from Accept() will terminate the
    +				// sub-listener. We could maybe be more precise, but it
    +				// doesn't seem necessary.
    +				terminate := err != nil
    +
    +				select {
    +				case ml.connCh <- connErrPair{conn: conn, err: err}:
    +				case <-ml.stopCh:
    +					// In case we accepted a connection AND were stopped, and
    +					// this select-case was chosen, just throw away the
    +					// connection.  This avoids potentially blocking on connCh
    +					// or leaking a connection.
    +					if conn != nil {
    +						_ = conn.Close()
    +					}
    +					terminate = true
    +				}
    +				// Make sure we don't loop on Accept() returning an error and
    +				// the select choosing the channel case.
    +				if terminate {
    +					return
    +				}
    +			}
    +		}(l)
    +	}
    +	return ml, nil
    +}
    +
    +// Accept implements net.Listener. It waits for and returns a connection from
    +// any of the sub-listener.
    +func (ml *multiListener) Accept() (net.Conn, error) {
    +	// wait for any sub-listener to enqueue an accepted connection
    +	connErr, ok := <-ml.connCh
    +	if !ok {
    +		// The channel will be closed only when Close() is called on the
    +		// multiListener. Closing of this channel implies that all
    +		// sub-listeners are also closed, which causes a "use of closed
    +		// network connection" error on their Accept() calls. We return the
    +		// same error for multiListener.Accept() if multiListener.Close()
    +		// has already been called.
    +		return nil, fmt.Errorf("use of closed network connection")
    +	}
    +	return connErr.conn, connErr.err
    +}
    +
    +// Close implements net.Listener. It will close all sub-listeners and wait for
    +// the go-routines to exit.
    +func (ml *multiListener) Close() error {
    +	// Make sure this can be called repeatedly without explosions.
    +	select {
    +	case <-ml.stopCh:
    +		return fmt.Errorf("use of closed network connection")
    +	default:
    +	}
    +
    +	// Tell all sub-listeners to stop.
    +	close(ml.stopCh)
    +
    +	// Closing the listeners causes Accept() to immediately return an error in
    +	// the sub-listener go-routines.
    +	for _, l := range ml.listeners {
    +		_ = l.Close()
    +	}
    +
    +	// Wait for all the sub-listener go-routines to exit.
    +	ml.wg.Wait()
    +	close(ml.connCh)
    +
    +	// Drain any already-queued connections.
    +	for connErr := range ml.connCh {
    +		if connErr.conn != nil {
    +			_ = connErr.conn.Close()
    +		}
    +	}
    +	return nil
    +}
    +
    +// Addr is an implementation of the net.Listener interface.  It always returns
    +// the address of the first listener.  Callers should  use conn.LocalAddr() to
    +// obtain the actual local address of the sub-listener.
    +func (ml *multiListener) Addr() net.Addr {
    +	return ml.listeners[0].Addr()
    +}
    +
    +// Addrs is like Addr, but returns the address for all registered listeners.
    +func (ml *multiListener) Addrs() []net.Addr {
    +	var ret []net.Addr
    +	for _, l := range ml.listeners {
    +		ret = append(ret, l.Addr())
    +	}
    +	return ret
    +}
    diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go
    index 187eb5d8c5..559aebb59a 100644
    --- a/vendor/k8s.io/utils/trace/trace.go
    +++ b/vendor/k8s.io/utils/trace/trace.go
    @@ -192,7 +192,7 @@ func (t *Trace) Log() {
     	t.endTime = &endTime
     	t.lock.Unlock()
     	// an explicit logging request should dump all the steps out at the higher level
    -	if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace
    +	if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace
     		t.logTrace()
     	}
     }
    diff --git a/vendor/modules.txt b/vendor/modules.txt
    index 799a6655df..ee77137bf1 100644
    --- a/vendor/modules.txt
    +++ b/vendor/modules.txt
    @@ -1,12 +1,32 @@
    -# cloud.google.com/go/compute/metadata v0.3.0
    -## explicit; go 1.19
    +# cloud.google.com/go/auth v0.13.0
    +## explicit; go 1.21
    +cloud.google.com/go/auth
    +cloud.google.com/go/auth/credentials
    +cloud.google.com/go/auth/credentials/internal/externalaccount
    +cloud.google.com/go/auth/credentials/internal/externalaccountuser
    +cloud.google.com/go/auth/credentials/internal/gdch
    +cloud.google.com/go/auth/credentials/internal/impersonate
    +cloud.google.com/go/auth/credentials/internal/stsexchange
    +cloud.google.com/go/auth/grpctransport
    +cloud.google.com/go/auth/httptransport
    +cloud.google.com/go/auth/internal
    +cloud.google.com/go/auth/internal/compute
    +cloud.google.com/go/auth/internal/credsfile
    +cloud.google.com/go/auth/internal/jwt
    +cloud.google.com/go/auth/internal/transport
    +cloud.google.com/go/auth/internal/transport/cert
    +# cloud.google.com/go/auth/oauth2adapt v0.2.6
    +## explicit; go 1.21
    +cloud.google.com/go/auth/oauth2adapt
    +# cloud.google.com/go/compute/metadata v0.6.0
    +## explicit; go 1.21
     cloud.google.com/go/compute/metadata
    -# cloud.google.com/go/monitoring v1.18.0
    -## explicit; go 1.19
    +# cloud.google.com/go/monitoring v1.20.4
    +## explicit; go 1.20
     cloud.google.com/go/monitoring/apiv3/v2
     cloud.google.com/go/monitoring/apiv3/v2/monitoringpb
     cloud.google.com/go/monitoring/internal
    -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
    +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
     ## explicit; go 1.18
     github.com/Azure/azure-sdk-for-go/sdk/azcore
     github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource
    @@ -27,11 +47,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/policy
     github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
     github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
     github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
    -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
    +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
     ## explicit; go 1.18
     github.com/Azure/azure-sdk-for-go/sdk/azidentity
     github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal
    -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0
    +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0
     ## explicit; go 1.18
     github.com/Azure/azure-sdk-for-go/sdk/internal/diag
     github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
    @@ -66,13 +86,13 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
     # github.com/alecthomas/kingpin/v2 v2.4.0
     ## explicit; go 1.17
     github.com/alecthomas/kingpin/v2
    -# github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
    +# github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
     ## explicit; go 1.15
     github.com/alecthomas/units
     # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
     ## explicit; go 1.13
     github.com/asaskevich/govalidator
    -# github.com/aws/aws-sdk-go v1.50.0
    +# github.com/aws/aws-sdk-go v1.55.5
     ## explicit; go 1.19
     github.com/aws/aws-sdk-go/aws
     github.com/aws/aws-sdk-go/aws/auth/bearer
    @@ -119,7 +139,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface
     # github.com/beorn7/perks v1.0.1
     ## explicit; go 1.11
     github.com/beorn7/perks/quantile
    -# github.com/cespare/xxhash/v2 v2.2.0
    +# github.com/cespare/xxhash/v2 v2.3.0
     ## explicit; go 1.11
     github.com/cespare/xxhash/v2
     # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
    @@ -128,10 +148,10 @@ github.com/davecgh/go-spew/spew
     # github.com/dennwc/varint v1.0.0
     ## explicit; go 1.12
     github.com/dennwc/varint
    -# github.com/edsrzf/mmap-go v1.1.0
    +# github.com/edsrzf/mmap-go v1.2.0
     ## explicit; go 1.17
     github.com/edsrzf/mmap-go
    -# github.com/efficientgo/core v1.0.0-rc.2
    +# github.com/efficientgo/core v1.0.0-rc.3
     ## explicit; go 1.17
     github.com/efficientgo/core/backoff
     github.com/efficientgo/core/errcapture
    @@ -172,9 +192,13 @@ github.com/evanphx/json-patch/v5/internal/json
     # github.com/felixge/httpsnoop v1.0.4
     ## explicit; go 1.13
     github.com/felixge/httpsnoop
    -# github.com/fsnotify/fsnotify v1.7.0
    +# github.com/fsnotify/fsnotify v1.8.0
     ## explicit; go 1.17
     github.com/fsnotify/fsnotify
    +github.com/fsnotify/fsnotify/internal
    +# github.com/fxamacker/cbor/v2 v2.7.0
    +## explicit; go 1.17
    +github.com/fxamacker/cbor/v2
     # github.com/go-kit/log v0.2.1
     ## explicit; go 1.17
     github.com/go-kit/log
    @@ -203,11 +227,11 @@ github.com/go-openapi/analysis/internal/flatten/operations
     github.com/go-openapi/analysis/internal/flatten/replace
     github.com/go-openapi/analysis/internal/flatten/schutils
     github.com/go-openapi/analysis/internal/flatten/sortref
    -# github.com/go-openapi/errors v0.21.0
    -## explicit; go 1.19
    +# github.com/go-openapi/errors v0.22.0
    +## explicit; go 1.20
     github.com/go-openapi/errors
    -# github.com/go-openapi/jsonpointer v0.20.2
    -## explicit; go 1.19
    +# github.com/go-openapi/jsonpointer v0.21.0
    +## explicit; go 1.20
     github.com/go-openapi/jsonpointer
     # github.com/go-openapi/jsonreference v0.20.4
     ## explicit; go 1.19
    @@ -219,13 +243,13 @@ github.com/go-openapi/loads
     # github.com/go-openapi/spec v0.20.14
     ## explicit; go 1.19
     github.com/go-openapi/spec
    -# github.com/go-openapi/strfmt v0.22.0
    -## explicit; go 1.19
    +# github.com/go-openapi/strfmt v0.23.0
    +## explicit; go 1.20
     github.com/go-openapi/strfmt
    -# github.com/go-openapi/swag v0.22.9
    -## explicit; go 1.19
    +# github.com/go-openapi/swag v0.23.0
    +## explicit; go 1.20
     github.com/go-openapi/swag
    -# github.com/go-openapi/validate v0.22.6
    +# github.com/go-openapi/validate v0.23.0
     ## explicit; go 1.19
     github.com/go-openapi/validate
     # github.com/gogo/protobuf v1.3.2
    @@ -243,7 +267,6 @@ github.com/golang-jwt/jwt/v5
     github.com/golang/groupcache/lru
     # github.com/golang/protobuf v1.5.4
     ## explicit; go 1.17
    -github.com/golang/protobuf/jsonpb
     github.com/golang/protobuf/proto
     github.com/golang/protobuf/ptypes
     github.com/golang/protobuf/ptypes/any
    @@ -259,8 +282,8 @@ github.com/google/gnostic-models/extensions
     github.com/google/gnostic-models/jsonschema
     github.com/google/gnostic-models/openapiv2
     github.com/google/gnostic-models/openapiv3
    -# github.com/google/go-cmp v0.6.0
    -## explicit; go 1.13
    +# github.com/google/go-cmp v0.7.0
    +## explicit; go 1.21
     github.com/google/go-cmp/cmp
     github.com/google/go-cmp/cmp/cmpopts
     github.com/google/go-cmp/cmp/internal/diff
    @@ -271,8 +294,8 @@ github.com/google/go-cmp/cmp/internal/value
     ## explicit; go 1.12
     github.com/google/gofuzz
     github.com/google/gofuzz/bytesource
    -# github.com/google/s2a-go v0.1.7
    -## explicit; go 1.19
    +# github.com/google/s2a-go v0.1.8
    +## explicit; go 1.20
     github.com/google/s2a-go
     github.com/google/s2a-go/fallback
     github.com/google/s2a-go/internal/authinfo
    @@ -300,25 +323,27 @@ github.com/google/shlex
     # github.com/google/uuid v1.6.0
     ## explicit
     github.com/google/uuid
    -# github.com/googleapis/enterprise-certificate-proxy v0.3.2
    +# github.com/googleapis/enterprise-certificate-proxy v0.3.4
     ## explicit; go 1.19
     github.com/googleapis/enterprise-certificate-proxy/client
     github.com/googleapis/enterprise-certificate-proxy/client/util
    -# github.com/googleapis/gax-go/v2 v2.12.0
    -## explicit; go 1.19
    +# github.com/googleapis/gax-go/v2 v2.14.0
    +## explicit; go 1.21
     github.com/googleapis/gax-go/v2
     github.com/googleapis/gax-go/v2/apierror
     github.com/googleapis/gax-go/v2/apierror/internal/proto
     github.com/googleapis/gax-go/v2/callctx
     github.com/googleapis/gax-go/v2/internal
    +github.com/googleapis/gax-go/v2/internallog
    +github.com/googleapis/gax-go/v2/internallog/internal
     # github.com/gorilla/websocket v1.5.0
     ## explicit; go 1.12
     github.com/gorilla/websocket
     # github.com/grafana/grafana-api-golang-client v0.27.0
     ## explicit; go 1.21
     github.com/grafana/grafana-api-golang-client
    -# github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
    -## explicit; go 1.17
    +# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
    +## explicit; go 1.21
     github.com/grafana/regexp
     github.com/grafana/regexp/syntax
     # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
    @@ -345,7 +370,16 @@ github.com/json-iterator/go
     # github.com/julienschmidt/httprouter v1.3.0
     ## explicit; go 1.7
     github.com/julienschmidt/httprouter
    -# github.com/klauspost/cpuid/v2 v2.2.5
    +# github.com/klauspost/compress v1.17.11
    +## explicit; go 1.21
    +github.com/klauspost/compress
    +github.com/klauspost/compress/fse
    +github.com/klauspost/compress/huff0
    +github.com/klauspost/compress/internal/cpuinfo
    +github.com/klauspost/compress/internal/snapref
    +github.com/klauspost/compress/zstd
    +github.com/klauspost/compress/zstd/internal/xxhash
    +# github.com/klauspost/cpuid/v2 v2.2.8
     ## explicit; go 1.15
     github.com/klauspost/cpuid/v2
     # github.com/kylelemons/godebug v1.1.0
    @@ -366,7 +400,7 @@ github.com/mitchellh/go-ps
     # github.com/mitchellh/mapstructure v1.5.0
     ## explicit; go 1.14
     github.com/mitchellh/mapstructure
    -# github.com/moby/spdystream v0.2.0
    +# github.com/moby/spdystream v0.4.0
     ## explicit; go 1.13
     github.com/moby/spdystream
     github.com/moby/spdystream/spdy
    @@ -400,16 +434,21 @@ github.com/pkg/errors
     # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
     ## explicit
     github.com/pmezard/go-difflib/difflib
    -# github.com/prometheus/alertmanager v0.26.0
    -## explicit; go 1.18
    +# github.com/prometheus/alertmanager v0.27.0
    +## explicit; go 1.21
     github.com/prometheus/alertmanager/api/v2/models
     github.com/prometheus/alertmanager/config
    +github.com/prometheus/alertmanager/featurecontrol
    +github.com/prometheus/alertmanager/matchers/compat
    +github.com/prometheus/alertmanager/matchers/parse
     github.com/prometheus/alertmanager/pkg/labels
     github.com/prometheus/alertmanager/timeinterval
    -# github.com/prometheus/client_golang v1.18.0
    -## explicit; go 1.19
    +# github.com/prometheus/client_golang v1.21.1
    +## explicit; go 1.21
     github.com/prometheus/client_golang/api
     github.com/prometheus/client_golang/api/prometheus/v1
    +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
    +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
     github.com/prometheus/client_golang/prometheus
     github.com/prometheus/client_golang/prometheus/collectors
     github.com/prometheus/client_golang/prometheus/internal
    @@ -418,10 +457,10 @@ github.com/prometheus/client_golang/prometheus/promhttp
     github.com/prometheus/client_golang/prometheus/testutil
     github.com/prometheus/client_golang/prometheus/testutil/promlint
     github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
    -# github.com/prometheus/client_model v0.5.0
    +# github.com/prometheus/client_model v0.6.1
     ## explicit; go 1.19
     github.com/prometheus/client_model/go
    -# github.com/prometheus/common v0.47.0 => github.com/TheSpiritXIII/prometheus-common v0.47.0-gmp.0
    +# github.com/prometheus/common v0.62.0 => github.com/TheSpiritXIII/prometheus-common v0.47.0-gmp.0
     ## explicit; go 1.20
     github.com/prometheus/common/config
     github.com/prometheus/common/expfmt
    @@ -436,8 +475,8 @@ github.com/prometheus/common/assets
     # github.com/prometheus/common/sigv4 v0.1.0
     ## explicit; go 1.15
     github.com/prometheus/common/sigv4
    -# github.com/prometheus/procfs v0.12.0
    -## explicit; go 1.19
    +# github.com/prometheus/procfs v0.15.1
    +## explicit; go 1.20
     github.com/prometheus/procfs
     github.com/prometheus/procfs/internal/fs
     github.com/prometheus/procfs/internal/util
    @@ -494,20 +533,24 @@ github.com/prometheus/prometheus/web/api/v1
     # github.com/spf13/pflag v1.0.5
     ## explicit; go 1.12
     github.com/spf13/pflag
    -# github.com/stretchr/testify v1.9.0
    +# github.com/stretchr/testify v1.10.0
     ## explicit; go 1.17
     github.com/stretchr/testify/assert
    +github.com/stretchr/testify/assert/yaml
     github.com/stretchr/testify/require
    -# github.com/thanos-io/thanos v0.34.2-0.20240314081355-f731719f9515
    -## explicit; go 1.21
    +# github.com/thanos-io/thanos v0.38.0
    +## explicit; go 1.24
     github.com/thanos-io/thanos/pkg/errutil
     github.com/thanos-io/thanos/pkg/reloader
     github.com/thanos-io/thanos/pkg/runutil
    +# github.com/x448/float16 v0.8.4
    +## explicit; go 1.11
    +github.com/x448/float16
     # github.com/xhit/go-str2duration/v2 v2.1.0
     ## explicit; go 1.13
     github.com/xhit/go-str2duration/v2
    -# go.mongodb.org/mongo-driver v1.13.1
    -## explicit; go 1.13
    +# go.mongodb.org/mongo-driver v1.14.0
    +## explicit; go 1.18
     go.mongodb.org/mongo-driver/bson
     go.mongodb.org/mongo-driver/bson/bsoncodec
     go.mongodb.org/mongo-driver/bson/bsonoptions
    @@ -515,35 +558,22 @@ go.mongodb.org/mongo-driver/bson/bsonrw
     go.mongodb.org/mongo-driver/bson/bsontype
     go.mongodb.org/mongo-driver/bson/primitive
     go.mongodb.org/mongo-driver/x/bsonx/bsoncore
    -# go.opencensus.io v0.24.0
    -## explicit; go 1.13
    -go.opencensus.io
    -go.opencensus.io/internal
    -go.opencensus.io/internal/tagencoding
    -go.opencensus.io/metric/metricdata
    -go.opencensus.io/metric/metricproducer
    -go.opencensus.io/plugin/ocgrpc
    -go.opencensus.io/plugin/ochttp
    -go.opencensus.io/plugin/ochttp/propagation/b3
    -go.opencensus.io/resource
    -go.opencensus.io/stats
    -go.opencensus.io/stats/internal
    -go.opencensus.io/stats/view
    -go.opencensus.io/tag
    -go.opencensus.io/trace
    -go.opencensus.io/trace/internal
    -go.opencensus.io/trace/propagation
    -go.opencensus.io/trace/tracestate
    -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0
    -## explicit; go 1.20
    +# go.opentelemetry.io/auto/sdk v1.1.0
    +## explicit; go 1.22.0
    +go.opentelemetry.io/auto/sdk
    +go.opentelemetry.io/auto/sdk/internal/telemetry
    +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0
    +## explicit; go 1.21
     go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
     go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
    -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0
    -## explicit; go 1.20
    +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
    +## explicit; go 1.22.0
     go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
    +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
    +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
     go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
    -# go.opentelemetry.io/otel v1.22.0
    -## explicit; go 1.20
    +# go.opentelemetry.io/otel v1.35.0
    +## explicit; go 1.22.0
     go.opentelemetry.io/otel
     go.opentelemetry.io/otel/attribute
     go.opentelemetry.io/otel/baggage
    @@ -555,14 +585,18 @@ go.opentelemetry.io/otel/internal/global
     go.opentelemetry.io/otel/propagation
     go.opentelemetry.io/otel/semconv/v1.17.0
     go.opentelemetry.io/otel/semconv/v1.20.0
    -# go.opentelemetry.io/otel/metric v1.22.0
    -## explicit; go 1.20
    +go.opentelemetry.io/otel/semconv/v1.26.0
    +# go.opentelemetry.io/otel/metric v1.35.0
    +## explicit; go 1.22.0
     go.opentelemetry.io/otel/metric
     go.opentelemetry.io/otel/metric/embedded
    -# go.opentelemetry.io/otel/trace v1.22.0
    -## explicit; go 1.20
    +go.opentelemetry.io/otel/metric/noop
    +# go.opentelemetry.io/otel/trace v1.35.0
    +## explicit; go 1.22.0
     go.opentelemetry.io/otel/trace
     go.opentelemetry.io/otel/trace/embedded
    +go.opentelemetry.io/otel/trace/internal/telemetry
    +go.opentelemetry.io/otel/trace/noop
     # go.uber.org/atomic v1.11.0
     ## explicit; go 1.18
     go.uber.org/atomic
    @@ -584,7 +618,7 @@ go.uber.org/zap/internal/exit
     go.uber.org/zap/internal/pool
     go.uber.org/zap/internal/stacktrace
     go.uber.org/zap/zapcore
    -# golang.org/x/crypto v0.35.0
    +# golang.org/x/crypto v0.36.0
     ## explicit; go 1.23.0
     golang.org/x/crypto/chacha20
     golang.org/x/crypto/chacha20poly1305
    @@ -595,17 +629,17 @@ golang.org/x/crypto/internal/alias
     golang.org/x/crypto/internal/poly1305
     golang.org/x/crypto/pkcs12
     golang.org/x/crypto/pkcs12/internal/rc2
    -# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a => golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
    +# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 => golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
     ## explicit; go 1.20
     golang.org/x/exp/constraints
     golang.org/x/exp/maps
     golang.org/x/exp/slices
    -# golang.org/x/mod v0.17.0
    -## explicit; go 1.18
    +# golang.org/x/mod v0.24.0
    +## explicit; go 1.23.0
     golang.org/x/mod/internal/lazyregexp
     golang.org/x/mod/module
     golang.org/x/mod/semver
    -# golang.org/x/net v0.36.0
    +# golang.org/x/net v0.37.0
     ## explicit; go 1.23.0
     golang.org/x/net/context
     golang.org/x/net/html
    @@ -621,47 +655,49 @@ golang.org/x/net/internal/timeseries
     golang.org/x/net/proxy
     golang.org/x/net/trace
     golang.org/x/net/websocket
    -# golang.org/x/oauth2 v0.17.0
    +# golang.org/x/oauth2 v0.24.0
     ## explicit; go 1.18
     golang.org/x/oauth2
     golang.org/x/oauth2/authhandler
     golang.org/x/oauth2/clientcredentials
     golang.org/x/oauth2/google
    -golang.org/x/oauth2/google/internal/externalaccount
    +golang.org/x/oauth2/google/externalaccount
     golang.org/x/oauth2/google/internal/externalaccountauthorizeduser
    +golang.org/x/oauth2/google/internal/impersonate
     golang.org/x/oauth2/google/internal/stsexchange
     golang.org/x/oauth2/internal
     golang.org/x/oauth2/jws
     golang.org/x/oauth2/jwt
    -# golang.org/x/sync v0.11.0
    -## explicit; go 1.18
    +# golang.org/x/sync v0.12.0
    +## explicit; go 1.23.0
     golang.org/x/sync/errgroup
     golang.org/x/sync/semaphore
    -# golang.org/x/sys v0.30.0
    -## explicit; go 1.18
    +# golang.org/x/sys v0.31.0
    +## explicit; go 1.23.0
     golang.org/x/sys/cpu
     golang.org/x/sys/plan9
     golang.org/x/sys/unix
     golang.org/x/sys/windows
    -# golang.org/x/term v0.29.0
    -## explicit; go 1.18
    +golang.org/x/sys/windows/registry
    +# golang.org/x/term v0.30.0
    +## explicit; go 1.23.0
     golang.org/x/term
    -# golang.org/x/text v0.22.0
    -## explicit; go 1.18
    +# golang.org/x/text v0.23.0
    +## explicit; go 1.23.0
     golang.org/x/text/secure/bidirule
     golang.org/x/text/transform
     golang.org/x/text/unicode/bidi
     golang.org/x/text/unicode/norm
    -# golang.org/x/time v0.5.0
    +# golang.org/x/time v0.8.0
     ## explicit; go 1.18
     golang.org/x/time/rate
    -# golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
    -## explicit; go 1.19
    +# golang.org/x/tools v0.31.0
    +## explicit; go 1.23.0
     golang.org/x/tools/go/ast/astutil
     golang.org/x/tools/go/gcexportdata
    -golang.org/x/tools/go/internal/packagesdriver
     golang.org/x/tools/go/packages
     golang.org/x/tools/go/types/objectpath
    +golang.org/x/tools/go/types/typeutil
     golang.org/x/tools/imports
     golang.org/x/tools/internal/aliases
     golang.org/x/tools/internal/event
    @@ -672,17 +708,18 @@ golang.org/x/tools/internal/gcimporter
     golang.org/x/tools/internal/gocommand
     golang.org/x/tools/internal/gopathwalk
     golang.org/x/tools/internal/imports
    +golang.org/x/tools/internal/modindex
     golang.org/x/tools/internal/packagesinternal
     golang.org/x/tools/internal/pkgbits
     golang.org/x/tools/internal/stdlib
    -golang.org/x/tools/internal/tokeninternal
    +golang.org/x/tools/internal/typeparams
     golang.org/x/tools/internal/typesinternal
     golang.org/x/tools/internal/versions
     # gomodules.xyz/jsonpatch/v2 v2.4.0
     ## explicit; go 1.20
     gomodules.xyz/jsonpatch/v2
    -# google.golang.org/api v0.162.0
    -## explicit; go 1.19
    +# google.golang.org/api v0.213.0
    +## explicit; go 1.21
     google.golang.org/api/googleapi
     google.golang.org/api/googleapi/transport
     google.golang.org/api/internal
    @@ -694,38 +731,24 @@ google.golang.org/api/option
     google.golang.org/api/option/internaloption
     google.golang.org/api/transport/grpc
     google.golang.org/api/transport/http
    -google.golang.org/api/transport/http/internal/propagation
    -# google.golang.org/appengine v1.6.8
    -## explicit; go 1.11
    -google.golang.org/appengine
    -google.golang.org/appengine/internal
    -google.golang.org/appengine/internal/app_identity
    -google.golang.org/appengine/internal/base
    -google.golang.org/appengine/internal/datastore
    -google.golang.org/appengine/internal/log
    -google.golang.org/appengine/internal/modules
    -google.golang.org/appengine/internal/remote_api
    -google.golang.org/appengine/internal/urlfetch
    -google.golang.org/appengine/urlfetch
    -# google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe
    -## explicit; go 1.19
    +# google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c
    +## explicit; go 1.21
     google.golang.org/genproto/googleapis/type/calendarperiod
    -google.golang.org/genproto/internal
    -# google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014
    -## explicit; go 1.19
    +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f
    +## explicit; go 1.22
     google.golang.org/genproto/googleapis/api
     google.golang.org/genproto/googleapis/api/annotations
     google.golang.org/genproto/googleapis/api/distribution
     google.golang.org/genproto/googleapis/api/label
     google.golang.org/genproto/googleapis/api/metric
     google.golang.org/genproto/googleapis/api/monitoredres
    -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014
    -## explicit; go 1.19
    +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f
    +## explicit; go 1.22
     google.golang.org/genproto/googleapis/rpc/code
     google.golang.org/genproto/googleapis/rpc/errdetails
     google.golang.org/genproto/googleapis/rpc/status
    -# google.golang.org/grpc v1.61.0
    -## explicit; go 1.19
    +# google.golang.org/grpc v1.69.4
    +## explicit; go 1.22
     google.golang.org/grpc
     google.golang.org/grpc/attributes
     google.golang.org/grpc/backoff
    @@ -734,6 +757,9 @@ google.golang.org/grpc/balancer/base
     google.golang.org/grpc/balancer/grpclb
     google.golang.org/grpc/balancer/grpclb/grpc_lb_v1
     google.golang.org/grpc/balancer/grpclb/state
    +google.golang.org/grpc/balancer/pickfirst
    +google.golang.org/grpc/balancer/pickfirst/internal
    +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
     google.golang.org/grpc/balancer/roundrobin
     google.golang.org/grpc/binarylog/grpc_binarylog_v1
     google.golang.org/grpc/channelz
    @@ -753,7 +779,9 @@ google.golang.org/grpc/credentials/oauth
     google.golang.org/grpc/encoding
     google.golang.org/grpc/encoding/gzip
     google.golang.org/grpc/encoding/proto
    +google.golang.org/grpc/experimental/stats
     google.golang.org/grpc/grpclog
    +google.golang.org/grpc/grpclog/internal
     google.golang.org/grpc/internal
     google.golang.org/grpc/internal/backoff
     google.golang.org/grpc/internal/balancer/gracefulswitch
    @@ -765,7 +793,6 @@ google.golang.org/grpc/internal/credentials
     google.golang.org/grpc/internal/envconfig
     google.golang.org/grpc/internal/googlecloud
     google.golang.org/grpc/internal/grpclog
    -google.golang.org/grpc/internal/grpcrand
     google.golang.org/grpc/internal/grpcsync
     google.golang.org/grpc/internal/grpcutil
     google.golang.org/grpc/internal/idle
    @@ -777,11 +804,14 @@ google.golang.org/grpc/internal/resolver/dns/internal
     google.golang.org/grpc/internal/resolver/passthrough
     google.golang.org/grpc/internal/resolver/unix
     google.golang.org/grpc/internal/serviceconfig
    +google.golang.org/grpc/internal/stats
     google.golang.org/grpc/internal/status
     google.golang.org/grpc/internal/syscall
     google.golang.org/grpc/internal/transport
     google.golang.org/grpc/internal/transport/networktype
    +google.golang.org/grpc/internal/xds
     google.golang.org/grpc/keepalive
    +google.golang.org/grpc/mem
     google.golang.org/grpc/metadata
     google.golang.org/grpc/peer
     google.golang.org/grpc/resolver
    @@ -791,8 +821,8 @@ google.golang.org/grpc/serviceconfig
     google.golang.org/grpc/stats
     google.golang.org/grpc/status
     google.golang.org/grpc/tap
    -# google.golang.org/protobuf v1.33.0
    -## explicit; go 1.17
    +# google.golang.org/protobuf v1.36.3
    +## explicit; go 1.21
     google.golang.org/protobuf/encoding/protodelim
     google.golang.org/protobuf/encoding/protojson
     google.golang.org/protobuf/encoding/prototext
    @@ -801,6 +831,7 @@ google.golang.org/protobuf/internal/descfmt
     google.golang.org/protobuf/internal/descopts
     google.golang.org/protobuf/internal/detrand
     google.golang.org/protobuf/internal/editiondefaults
    +google.golang.org/protobuf/internal/editionssupport
     google.golang.org/protobuf/internal/encoding/defval
     google.golang.org/protobuf/internal/encoding/json
     google.golang.org/protobuf/internal/encoding/messageset
    @@ -815,10 +846,12 @@ google.golang.org/protobuf/internal/impl
     google.golang.org/protobuf/internal/msgfmt
     google.golang.org/protobuf/internal/order
     google.golang.org/protobuf/internal/pragma
    +google.golang.org/protobuf/internal/protolazy
     google.golang.org/protobuf/internal/set
     google.golang.org/protobuf/internal/strs
     google.golang.org/protobuf/internal/version
     google.golang.org/protobuf/proto
    +google.golang.org/protobuf/protoadapt
     google.golang.org/protobuf/reflect/protodesc
     google.golang.org/protobuf/reflect/protoreflect
     google.golang.org/protobuf/reflect/protoregistry
    @@ -834,6 +867,9 @@ google.golang.org/protobuf/types/known/fieldmaskpb
     google.golang.org/protobuf/types/known/structpb
     google.golang.org/protobuf/types/known/timestamppb
     google.golang.org/protobuf/types/known/wrapperspb
    +# gopkg.in/evanphx/json-patch.v4 v4.12.0
    +## explicit
    +gopkg.in/evanphx/json-patch.v4
     # gopkg.in/inf.v0 v0.9.1
     ## explicit
     gopkg.in/inf.v0
    @@ -843,7 +879,7 @@ gopkg.in/yaml.v2
     # gopkg.in/yaml.v3 v3.0.1
     ## explicit
     gopkg.in/yaml.v3
    -# k8s.io/api v0.30.1
    +# k8s.io/api v0.31.3
     ## explicit; go 1.22.0
     k8s.io/api/admission/v1
     k8s.io/api/admission/v1beta1
    @@ -871,6 +907,7 @@ k8s.io/api/certificates/v1
     k8s.io/api/certificates/v1alpha1
     k8s.io/api/certificates/v1beta1
     k8s.io/api/coordination/v1
    +k8s.io/api/coordination/v1alpha1
     k8s.io/api/coordination/v1beta1
     k8s.io/api/core/v1
     k8s.io/api/discovery/v1
    @@ -882,6 +919,7 @@ k8s.io/api/flowcontrol/v1
     k8s.io/api/flowcontrol/v1beta1
     k8s.io/api/flowcontrol/v1beta2
     k8s.io/api/flowcontrol/v1beta3
    +k8s.io/api/imagepolicy/v1alpha1
     k8s.io/api/networking/v1
     k8s.io/api/networking/v1alpha1
     k8s.io/api/networking/v1beta1
    @@ -893,7 +931,7 @@ k8s.io/api/policy/v1beta1
     k8s.io/api/rbac/v1
     k8s.io/api/rbac/v1alpha1
     k8s.io/api/rbac/v1beta1
    -k8s.io/api/resource/v1alpha2
    +k8s.io/api/resource/v1alpha3
     k8s.io/api/scheduling/v1
     k8s.io/api/scheduling/v1alpha1
     k8s.io/api/scheduling/v1beta1
    @@ -912,15 +950,17 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset
     k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
     k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
     k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
    -# k8s.io/apimachinery v0.30.2
    +# k8s.io/apimachinery v0.31.3
     ## explicit; go 1.22.0
     k8s.io/apimachinery/pkg/api/equality
     k8s.io/apimachinery/pkg/api/errors
     k8s.io/apimachinery/pkg/api/meta
    +k8s.io/apimachinery/pkg/api/meta/testrestmapper
     k8s.io/apimachinery/pkg/api/resource
     k8s.io/apimachinery/pkg/api/validation
     k8s.io/apimachinery/pkg/apis/meta/internalversion
     k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
    +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation
     k8s.io/apimachinery/pkg/apis/meta/v1
     k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
     k8s.io/apimachinery/pkg/apis/meta/v1/validation
    @@ -932,6 +972,8 @@ k8s.io/apimachinery/pkg/labels
     k8s.io/apimachinery/pkg/runtime
     k8s.io/apimachinery/pkg/runtime/schema
     k8s.io/apimachinery/pkg/runtime/serializer
    +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct
    +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes
     k8s.io/apimachinery/pkg/runtime/serializer/json
     k8s.io/apimachinery/pkg/runtime/serializer/protobuf
     k8s.io/apimachinery/pkg/runtime/serializer/recognizer
    @@ -975,8 +1017,9 @@ k8s.io/apimachinery/third_party/forked/golang/reflect
     # k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2
     ## explicit; go 1.21
     k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1
    -# k8s.io/client-go v0.30.1
    +# k8s.io/client-go v0.31.3
     ## explicit; go 1.22.0
    +k8s.io/client-go/applyconfigurations
     k8s.io/client-go/applyconfigurations/admissionregistration/v1
     k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
     k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
    @@ -994,6 +1037,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1
     k8s.io/client-go/applyconfigurations/certificates/v1alpha1
     k8s.io/client-go/applyconfigurations/certificates/v1beta1
     k8s.io/client-go/applyconfigurations/coordination/v1
    +k8s.io/client-go/applyconfigurations/coordination/v1alpha1
     k8s.io/client-go/applyconfigurations/coordination/v1beta1
     k8s.io/client-go/applyconfigurations/core/v1
     k8s.io/client-go/applyconfigurations/discovery/v1
    @@ -1005,6 +1049,7 @@ k8s.io/client-go/applyconfigurations/flowcontrol/v1
     k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1
     k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2
     k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3
    +k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1
     k8s.io/client-go/applyconfigurations/internal
     k8s.io/client-go/applyconfigurations/meta/v1
     k8s.io/client-go/applyconfigurations/networking/v1
    @@ -1018,7 +1063,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1
     k8s.io/client-go/applyconfigurations/rbac/v1
     k8s.io/client-go/applyconfigurations/rbac/v1alpha1
     k8s.io/client-go/applyconfigurations/rbac/v1beta1
    -k8s.io/client-go/applyconfigurations/resource/v1alpha2
    +k8s.io/client-go/applyconfigurations/resource/v1alpha3
     k8s.io/client-go/applyconfigurations/scheduling/v1
     k8s.io/client-go/applyconfigurations/scheduling/v1alpha1
     k8s.io/client-go/applyconfigurations/scheduling/v1beta1
    @@ -1030,6 +1075,77 @@ k8s.io/client-go/discovery
     k8s.io/client-go/discovery/fake
     k8s.io/client-go/dynamic
     k8s.io/client-go/features
    +k8s.io/client-go/gentype
    +k8s.io/client-go/informers
    +k8s.io/client-go/informers/admissionregistration
    +k8s.io/client-go/informers/admissionregistration/v1
    +k8s.io/client-go/informers/admissionregistration/v1alpha1
    +k8s.io/client-go/informers/admissionregistration/v1beta1
    +k8s.io/client-go/informers/apiserverinternal
    +k8s.io/client-go/informers/apiserverinternal/v1alpha1
    +k8s.io/client-go/informers/apps
    +k8s.io/client-go/informers/apps/v1
    +k8s.io/client-go/informers/apps/v1beta1
    +k8s.io/client-go/informers/apps/v1beta2
    +k8s.io/client-go/informers/autoscaling
    +k8s.io/client-go/informers/autoscaling/v1
    +k8s.io/client-go/informers/autoscaling/v2
    +k8s.io/client-go/informers/autoscaling/v2beta1
    +k8s.io/client-go/informers/autoscaling/v2beta2
    +k8s.io/client-go/informers/batch
    +k8s.io/client-go/informers/batch/v1
    +k8s.io/client-go/informers/batch/v1beta1
    +k8s.io/client-go/informers/certificates
    +k8s.io/client-go/informers/certificates/v1
    +k8s.io/client-go/informers/certificates/v1alpha1
    +k8s.io/client-go/informers/certificates/v1beta1
    +k8s.io/client-go/informers/coordination
    +k8s.io/client-go/informers/coordination/v1
    +k8s.io/client-go/informers/coordination/v1alpha1
    +k8s.io/client-go/informers/coordination/v1beta1
    +k8s.io/client-go/informers/core
    +k8s.io/client-go/informers/core/v1
    +k8s.io/client-go/informers/discovery
    +k8s.io/client-go/informers/discovery/v1
    +k8s.io/client-go/informers/discovery/v1beta1
    +k8s.io/client-go/informers/events
    +k8s.io/client-go/informers/events/v1
    +k8s.io/client-go/informers/events/v1beta1
    +k8s.io/client-go/informers/extensions
    +k8s.io/client-go/informers/extensions/v1beta1
    +k8s.io/client-go/informers/flowcontrol
    +k8s.io/client-go/informers/flowcontrol/v1
    +k8s.io/client-go/informers/flowcontrol/v1beta1
    +k8s.io/client-go/informers/flowcontrol/v1beta2
    +k8s.io/client-go/informers/flowcontrol/v1beta3
    +k8s.io/client-go/informers/internalinterfaces
    +k8s.io/client-go/informers/networking
    +k8s.io/client-go/informers/networking/v1
    +k8s.io/client-go/informers/networking/v1alpha1
    +k8s.io/client-go/informers/networking/v1beta1
    +k8s.io/client-go/informers/node
    +k8s.io/client-go/informers/node/v1
    +k8s.io/client-go/informers/node/v1alpha1
    +k8s.io/client-go/informers/node/v1beta1
    +k8s.io/client-go/informers/policy
    +k8s.io/client-go/informers/policy/v1
    +k8s.io/client-go/informers/policy/v1beta1
    +k8s.io/client-go/informers/rbac
    +k8s.io/client-go/informers/rbac/v1
    +k8s.io/client-go/informers/rbac/v1alpha1
    +k8s.io/client-go/informers/rbac/v1beta1
    +k8s.io/client-go/informers/resource
    +k8s.io/client-go/informers/resource/v1alpha3
    +k8s.io/client-go/informers/scheduling
    +k8s.io/client-go/informers/scheduling/v1
    +k8s.io/client-go/informers/scheduling/v1alpha1
    +k8s.io/client-go/informers/scheduling/v1beta1
    +k8s.io/client-go/informers/storage
    +k8s.io/client-go/informers/storage/v1
    +k8s.io/client-go/informers/storage/v1alpha1
    +k8s.io/client-go/informers/storage/v1beta1
    +k8s.io/client-go/informers/storagemigration
    +k8s.io/client-go/informers/storagemigration/v1alpha1
     k8s.io/client-go/kubernetes
     k8s.io/client-go/kubernetes/fake
     k8s.io/client-go/kubernetes/scheme
    @@ -1077,6 +1193,8 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1
     k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake
     k8s.io/client-go/kubernetes/typed/coordination/v1
     k8s.io/client-go/kubernetes/typed/coordination/v1/fake
    +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1
    +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake
     k8s.io/client-go/kubernetes/typed/coordination/v1beta1
     k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake
     k8s.io/client-go/kubernetes/typed/core/v1
    @@ -1121,8 +1239,8 @@ k8s.io/client-go/kubernetes/typed/rbac/v1alpha1
     k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake
     k8s.io/client-go/kubernetes/typed/rbac/v1beta1
     k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake
    -k8s.io/client-go/kubernetes/typed/resource/v1alpha2
    -k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake
    +k8s.io/client-go/kubernetes/typed/resource/v1alpha3
    +k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake
     k8s.io/client-go/kubernetes/typed/scheduling/v1
     k8s.io/client-go/kubernetes/typed/scheduling/v1/fake
     k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1
    @@ -1137,6 +1255,55 @@ k8s.io/client-go/kubernetes/typed/storage/v1beta1
     k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake
     k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1
     k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake
    +k8s.io/client-go/listers
    +k8s.io/client-go/listers/admissionregistration/v1
    +k8s.io/client-go/listers/admissionregistration/v1alpha1
    +k8s.io/client-go/listers/admissionregistration/v1beta1
    +k8s.io/client-go/listers/apiserverinternal/v1alpha1
    +k8s.io/client-go/listers/apps/v1
    +k8s.io/client-go/listers/apps/v1beta1
    +k8s.io/client-go/listers/apps/v1beta2
    +k8s.io/client-go/listers/autoscaling/v1
    +k8s.io/client-go/listers/autoscaling/v2
    +k8s.io/client-go/listers/autoscaling/v2beta1
    +k8s.io/client-go/listers/autoscaling/v2beta2
    +k8s.io/client-go/listers/batch/v1
    +k8s.io/client-go/listers/batch/v1beta1
    +k8s.io/client-go/listers/certificates/v1
    +k8s.io/client-go/listers/certificates/v1alpha1
    +k8s.io/client-go/listers/certificates/v1beta1
    +k8s.io/client-go/listers/coordination/v1
    +k8s.io/client-go/listers/coordination/v1alpha1
    +k8s.io/client-go/listers/coordination/v1beta1
    +k8s.io/client-go/listers/core/v1
    +k8s.io/client-go/listers/discovery/v1
    +k8s.io/client-go/listers/discovery/v1beta1
    +k8s.io/client-go/listers/events/v1
    +k8s.io/client-go/listers/events/v1beta1
    +k8s.io/client-go/listers/extensions/v1beta1
    +k8s.io/client-go/listers/flowcontrol/v1
    +k8s.io/client-go/listers/flowcontrol/v1beta1
    +k8s.io/client-go/listers/flowcontrol/v1beta2
    +k8s.io/client-go/listers/flowcontrol/v1beta3
    +k8s.io/client-go/listers/networking/v1
    +k8s.io/client-go/listers/networking/v1alpha1
    +k8s.io/client-go/listers/networking/v1beta1
    +k8s.io/client-go/listers/node/v1
    +k8s.io/client-go/listers/node/v1alpha1
    +k8s.io/client-go/listers/node/v1beta1
    +k8s.io/client-go/listers/policy/v1
    +k8s.io/client-go/listers/policy/v1beta1
    +k8s.io/client-go/listers/rbac/v1
    +k8s.io/client-go/listers/rbac/v1alpha1
    +k8s.io/client-go/listers/rbac/v1beta1
    +k8s.io/client-go/listers/resource/v1alpha3
    +k8s.io/client-go/listers/scheduling/v1
    +k8s.io/client-go/listers/scheduling/v1alpha1
    +k8s.io/client-go/listers/scheduling/v1beta1
    +k8s.io/client-go/listers/storage/v1
    +k8s.io/client-go/listers/storage/v1alpha1
    +k8s.io/client-go/listers/storage/v1beta1
    +k8s.io/client-go/listers/storagemigration/v1alpha1
     k8s.io/client-go/metadata
     k8s.io/client-go/openapi
     k8s.io/client-go/pkg/apis/clientauthentication
    @@ -1172,9 +1339,11 @@ k8s.io/client-go/transport/spdy
     k8s.io/client-go/transport/websocket
     k8s.io/client-go/util/cert
     k8s.io/client-go/util/connrotation
    +k8s.io/client-go/util/consistencydetector
     k8s.io/client-go/util/flowcontrol
     k8s.io/client-go/util/homedir
     k8s.io/client-go/util/keyutil
    +k8s.io/client-go/util/watchlist
     k8s.io/client-go/util/workqueue
     # k8s.io/code-generator v0.30.2
     ## explicit; go 1.22.0
    @@ -1219,7 +1388,7 @@ k8s.io/gengo/v2/generator
     k8s.io/gengo/v2/namer
     k8s.io/gengo/v2/parser
     k8s.io/gengo/v2/types
    -# k8s.io/klog/v2 v2.120.1
    +# k8s.io/klog/v2 v2.130.1
     ## explicit; go 1.18
     k8s.io/klog/v2
     k8s.io/klog/v2/internal/buffer
    @@ -1239,7 +1408,7 @@ k8s.io/kube-openapi/pkg/schemaconv
     k8s.io/kube-openapi/pkg/spec3
     k8s.io/kube-openapi/pkg/util/proto
     k8s.io/kube-openapi/pkg/validation/spec
    -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b
    +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
     ## explicit; go 1.18
     k8s.io/utils/buffer
     k8s.io/utils/clock